status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
369
body
stringlengths
0
254k
issue_url
stringlengths
37
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
4
188
file_content
stringlengths
0
5.12M
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
4,385
[Improvement][recording data] Manually configure the parallelism of supplementary when recording data
*i met a situation that the memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks, if there is a feature that we can configure the parallelism of supplementary recording data manually * **Describe the question** The memory fulls and the dolphinscheduler server down when i start the recording data(补数) tasks. **What are the current deficiencies and the benefits of improvement** We can configure the parallelism of supplementary recording data manually so that the machine resource will not be occupied too much. **Which version of DolphinScheduler:** -[1.3.3-preview] **Describe alternatives you've considered** Changing the model to "Serial"(串行) but this model will take too much time.
https://github.com/apache/dolphinscheduler/issues/4385
https://github.com/apache/dolphinscheduler/pull/5912
2afa625a753680313e8a6c5fb3a68e01e56f5caa
1887bde1ebf8249de153890e78d449582e3eaf4a
"2021-01-06T02:58:47Z"
java
"2021-08-16T06:59:28Z"
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ export default { 'User Name': '用户名', 'Please enter user name': '请输入用户名', Password: '密码', 'Please enter your password': '请输入密码', 'Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22': '密码至少包含数字,字母和字符的两种组合,长度在6-22之间', Login: '登录', Home: '首页', 'Failed to create node to save': '未创建节点保存失败', 'Global parameters': '全局参数', 'Local parameters': '局部参数', 'Copy success': '复制成功', 'The browser does not support automatic copying': '该浏览器不支持自动复制', 'Whether to save the DAG graph': '是否保存DAG图', 'Current node settings': '当前节点设置', 'View history': '查看历史', 'View log': '查看日志', 'Force success': '强制成功', 'Enter this child node': '进入该子节点', 'Node name': '节点名称', 'Please enter name (required)': '请输入名称(必填)', 'Run flag': '运行标志', Normal: '正常', 'Prohibition execution': '禁止执行', 'Please enter description': '请输入描述', 'Number of failed retries': '失败重试次数', Times: '次', 'Failed retry interval': '失败重试间隔', Minute: '分', 'Delay execution time': '延时执行时间', 'Delay execution': '延时执行', 'Forced success': '强制成功', Cancel: '取消', 'Confirm add': '确认添加', 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process': '新创建子工作流还未执行,不能进入子工作流', 'The task has not been executed and cannot enter the sub-Process': '该任务还未执行,不能进入子工作流', 'Name already exists': '名称已存在请重新输入', 'Download Log': '下载日志', 'Refresh Log': '刷新日志', 'Enter full screen': '进入全屏', 'Cancel full screen': '取消全屏', Close: '关闭', 'Update log success': '更新日志成功', 'No more logs': '暂无更多日志', 'No log': '暂无日志', 'Loading Log...': '正在努力请求日志中...', 'Set the DAG diagram name': '设置DAG图名称', 'Please enter description(optional)': '请输入描述(选填)', 'Set global': '设置全局', 'Whether to go online the process definition': '是否上线流程定义', 'Whether to update the process definition': '是否更新流程定义', Add: '添加', 'DAG graph name cannot be empty': 'DAG图名称不能为空', 'Create Datasource': '创建数据源', 'Project Home': '工作流监控', 'Project Manage': '项目管理', 'Create Project': '创建项目', 'Cron Manage': '定时管理', 'Copy Workflow': '复制工作流', 'Tenant Manage': '租户管理', 'Create Tenant': '创建租户', 'User Manage': '用户管理', 'Create User': '创建用户', 'User Information': '用户信息', 'Edit Password': '密码修改', Success: '成功', Failed: '失败', Delete: '删除', 'Please choose': '请选择', 'Please enter a positive integer': '请输入正整数', 'Program Type': '程序类型', 'Main Class': '主函数的Class', 'Main Jar Package': '主Jar包', 'Please enter main jar package': '请选择主Jar包', 'Please enter main class': '请填写主函数的Class', 'Main Arguments': '主程序参数', 'Please enter main arguments': '请输入主程序参数', 'Option Parameters': '选项参数', 'Please enter option parameters': '请输入选项参数', Resources: '资源', 'Custom Parameters': '自定义参数', 'Custom template': '自定义模版', Datasource: '数据源', methods: '方法', 'Please enter the procedure method': '请输入存储脚本 \n\n调用存储过程:{call <procedure-name>[(<arg1>,<arg2>, ...)]}\n\n调用存储函数:{?= call <procedure-name>[(<arg1>,<arg2>, ...)]} ', 'The procedure method script example': '示例:{call <procedure-name>[(?,?, ...)]} 或 {?= call <procedure-name>[(?,?, ...)]}', Script: '脚本', 'Please enter script(required)': '请输入脚本(必填)', 'Deploy Mode': '部署方式', 'Driver Cores': 'Driver核心数', 'Please enter Driver cores': '请输入Driver核心数', 'Driver Memory': 'Driver内存数', 'Please enter Driver memory': '请输入Driver内存数', 'Executor Number': 'Executor数量', 'Please enter Executor number': '请输入Executor数量', 'The Executor number should be a positive integer': 'Executor数量为正整数', 'Executor Memory': 'Executor内存数', 'Please enter Executor memory': '请输入Executor内存数', 'Executor Cores': 'Executor核心数', 'Please enter Executor cores': '请输入Executor核心数', 'Memory should be a positive integer': '内存数为数字', 'Core number should be positive integer': '核心数为正整数', 'Flink Version': 'Flink版本', 'JobManager Memory': 'JobManager内存数', 'Please enter JobManager memory': '请输入JobManager内存数', 'TaskManager Memory': 'TaskManager内存数', 'Please enter TaskManager memory': '请输入TaskManager内存数', 'Slot Number': 'Slot数量', 'Please enter Slot number': '请输入Slot数量', Parallelism: '并行度', 'Please enter Parallelism': '请输入并行度', 'TaskManager Number': 'TaskManager数量', 'Please enter TaskManager number': '请输入TaskManager数量', 'App Name': '任务名称', 'Please enter app name(optional)': '请输入任务名称(选填)', 'SQL Type': 'sql类型', 'Send Email': '发送邮件', 'Log display': '日志显示', 'rows of result': '行查询结果', 'Max Numbers Return': '返回的记录行数', 'Max Numbers Return placeholder': '默认值10000,如果值过大可能会对内存造成较大压力', 'Max Numbers Return required': '返回的记录行数值必须是一个在0-2147483647范围内的整数', Title: '主题', 'Please enter the title of email': '请输入邮件主题', Table: '表名', TableMode: '表格', Attachment: '附件', 'SQL Parameter': 'sql参数', 'SQL Statement': 'sql语句', 'UDF Function': 'UDF函数', 'Please enter a SQL Statement(required)': '请输入sql语句(必填)', 'Please enter a JSON Statement(required)': '请输入json语句(必填)', 'One form or attachment must be selected': '表格、附件必须勾选一个', 'Mail subject required': '邮件主题必填', 'Child Node': '子节点', 'Please select a sub-Process': '请选择子工作流', Edit: '编辑', 'Switch To This Version': '切换到该版本', 'Datasource Name': '数据源名称', 'Please enter datasource name': '请输入数据源名称', IP: 'IP主机名', 'Please enter IP': '请输入IP主机名', Port: '端口', 'Please enter port': '请输入端口', 'Database Name': '数据库名', 'Please enter database name': '请输入数据库名', 'Oracle Connect Type': '服务名或SID', 'Oracle Service Name': '服务名', 'Oracle SID': 'SID', 'jdbc connect parameters': 'jdbc连接参数', 'Test Connect': '测试连接', 'Please enter resource name': '请输入数据源名称', 'Please enter resource folder name': '请输入资源文件夹名称', 'Please enter a non-query SQL statement': '请输入非查询sql语句', 'Please enter IP/hostname': '请输入IP/主机名', 'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式', '#': '编号', 'Datasource Type': '数据源类型', 'Datasource Parameter': '数据源参数', 'Create Time': '创建时间', 'Update Time': '更新时间', Operation: '操作', 'Current Version': '当前版本', 'Click to view': '点击查看', 'Delete?': '确定删除吗?', 'Switch Version Successfully': '切换版本成功', 'Confirm Switch To This Version?': '确定切换到该版本吗?', Confirm: '确定', 'Task status statistics': '任务状态统计', Number: '数量', State: '状态', 'Process Status Statistics': '流程状态统计', 'Process Definition Statistics': '流程定义统计', 'Project Name': '项目名称', 'Please enter name': '请输入名称', 'Owned Users': '所属用户', 'Process Pid': '进程Pid', 'Zk registration directory': 'zk注册目录', cpuUsage: 'cpuUsage', memoryUsage: 'memoryUsage', 'Last heartbeat time': '最后心跳时间', 'Edit Tenant': '编辑租户', 'OS Tenant Code': '操作系统租户', 'Tenant Name': '租户名称', Queue: '队列', 'Please select a queue': '默认为租户关联队列', 'Please enter the os tenant code in English': '请输入操作系统租户只允许英文', 'Please enter os tenant code in English': '请输入英文操作系统租户', 'Please enter os tenant code': '请输入操作系统租户', 'Please enter tenant Name': '请输入租户名称', 'The os tenant code. Only letters or a combination of letters and numbers are allowed': '操作系统租户只允许字母或字母与数字组合', 'Edit User': '编辑用户', Tenant: '租户', Email: '邮件', Phone: '手机', 'User Type': '用户类型', 'Please enter phone number': '请输入手机', 'Please enter email': '请输入邮箱', 'Please enter the correct email format': '请输入正确的邮箱格式', 'Please enter the correct mobile phone format': '请输入正确的手机格式', Project: '项目', Authorize: '授权', 'File resources': '文件资源', 'UDF resources': 'UDF资源', 'UDF resources directory': 'UDF资源目录', 'Please select UDF resources directory': '请选择UDF资源目录', 'Alarm group': '告警组', 'Alarm group required': '告警组必填', 'Edit alarm group': '编辑告警组', 'Create alarm group': '创建告警组', 'Create Alarm Instance': '创建告警实例', 'Edit Alarm Instance': '编辑告警实例', 'Group Name': '组名称', 'Alarm instance name': '告警实例名称', 'Alarm plugin name': '告警插件名称', 'Select plugin': '选择插件', 'Select Alarm plugin': '请选择告警插件', 'Please enter group name': '请输入组名称', 'Instance parameter exception': '实例参数异常', 'Group Type': '组类型', 'Alarm plugin instance': '告警插件实例', 'Select Alarm plugin instance': '请选择告警插件实例', Remarks: '备注', SMS: '短信', 'Managing Users': '管理用户', Permission: '权限', Administrator: '管理员', 'Confirm Password': '确认密码', 'Please enter confirm password': '请输入确认密码', 'Password cannot be in Chinese': '密码不能为中文', 'Please enter a password (6-22) character password': '请输入密码(6-22)字符密码', 'Confirmation password cannot be in Chinese': '确认密码不能为中文', 'Please enter a confirmation password (6-22) character password': '请输入确认密码(6-22)字符密码', 'The password is inconsistent with the confirmation password': '密码与确认密码不一致,请重新确认', 'Please select the datasource': '请选择数据源', 'Please select resources': '请选择资源', Query: '查询', 'Non Query': '非查询', 'prop(required)': 'prop(必填)', 'value(optional)': 'value(选填)', 'value(required)': 'value(必填)', 'prop is empty': 'prop不能为空', 'value is empty': 'value不能为空', 'prop is repeat': 'prop中有重复', 'Start Time': '开始时间', 'End Time': '结束时间', crontab: 'crontab', 'Failure Strategy': '失败策略', online: '上线', offline: '下线', 'Task Status': '任务状态', 'Process Instance': '工作流实例', 'Task Instance': '任务实例', 'Select date range': '选择日期区间', startDate: '开始日期', endDate: '结束日期', Date: '日期', Waiting: '等待', Execution: '执行中', Finish: '完成', 'Create File': '创建文件', 'Create folder': '创建文件夹', 'File Name': '文件名称', 'Folder Name': '文件夹名称', 'File Format': '文件格式', 'Folder Format': '文件夹格式', 'File Content': '文件内容', 'Upload File Size': '文件大小不能超过1G', Create: '创建', 'Please enter the resource content': '请输入资源内容', 'Resource content cannot exceed 3000 lines': '资源内容不能超过3000行', 'File Details': '文件详情', 'Download Details': '下载详情', Return: '返回', Save: '保存', 'File Manage': '文件管理', 'Upload Files': '上传文件', 'Create UDF Function': '创建UDF函数', 'Upload UDF Resources': '上传UDF资源', 'Service-Master': '服务管理-Master', 'Service-Worker': '服务管理-Worker', 'Process Name': '工作流名称', Executor: '执行用户', 'Run Type': '运行类型', 'Scheduling Time': '调度时间', 'Run Times': '运行次数', host: 'host', 'fault-tolerant sign': '容错标识', Rerun: '重跑', 'Recovery Failed': '恢复失败', Stop: '停止', Pause: '暂停', 'Recovery Suspend': '恢复运行', Gantt: '甘特图', 'Node Type': '节点类型', 'Submit Time': '提交时间', Duration: '运行时长', 'Retry Count': '重试次数', 'Task Name': '任务名称', 'Task Date': '任务日期', 'Source Table': '源表', 'Record Number': '记录数', 'Target Table': '目标表', 'Online viewing type is not supported': '不支持在线查看类型', Size: '大小', Rename: '重命名', Download: '下载', Export: '导出', 'Version Info': '版本信息', Submit: '提交', 'Edit UDF Function': '编辑UDF函数', type: '类型', 'UDF Function Name': 'UDF函数名称', FILE: '文件', UDF: 'UDF', 'File Subdirectory': '文件子目录', 'Please enter a function name': '请输入函数名', 'Package Name': '包名类名', 'Please enter a Package name': '请输入包名类名', Parameter: '参数', 'Please enter a parameter': '请输入参数', 'UDF Resources': 'UDF资源', 'Upload Resources': '上传资源', Instructions: '使用说明', 'Please enter a instructions': '请输入使用说明', 'Please enter a UDF function name': '请输入UDF函数名称', 'Select UDF Resources': '请选择UDF资源', 'Class Name': '类名', 'Jar Package': 'jar包', 'Library Name': '库名', 'UDF Resource Name': 'UDF资源名称', 'File Size': '文件大小', Description: '描述', 'Drag Nodes and Selected Items': '拖动节点和选中项', 'Select Line Connection': '选择线条连接', 'Delete selected lines or nodes': '删除选中的线或节点', 'Full Screen': '全屏', Unpublished: '未发布', 'Start Process': '启动工作流', 'Execute from the current node': '从当前节点开始执行', 'Recover tolerance fault process': '恢复被容错的工作流', 'Resume the suspension process': '恢复运行流程', 'Execute from the failed nodes': '从失败节点开始执行', 'Complement Data': '补数', 'Scheduling execution': '调度执行', 'Recovery waiting thread': '恢复等待线程', 'Submitted successfully': '提交成功', Executing: '正在执行', 'Ready to pause': '准备暂停', 'Ready to stop': '准备停止', 'Need fault tolerance': '需要容错', Kill: 'Kill', 'Waiting for thread': '等待线程', 'Waiting for dependence': '等待依赖', Start: '运行', Copy: '复制节点', 'Copy name': '复制名称', 'Copy path': '复制路径', 'Please enter keyword': '请输入关键词', 'File Upload': '文件上传', 'Drag the file into the current upload window': '请将文件拖拽到当前上传窗口内!', 'Drag area upload': '拖动区域上传', Upload: '上传', 'ReUpload File': '重新上传文件', 'Please enter file name': '请输入文件名', 'Please select the file to upload': '请选择要上传的文件', 'Resources manage': '资源中心', Security: '安全中心', Logout: '退出', 'No data': '查询无数据', 'Uploading...': '文件上传中', 'Loading...': '正在努力加载中...', List: '列表', 'Unable to download without proper url': '无下载url无法下载', Process: '工作流', 'Process definition': '工作流定义', 'Task record': '任务记录', 'Warning group manage': '告警组管理', 'Warning instance manage': '告警实例管理', 'Servers manage': '服务管理', 'UDF manage': 'UDF管理', 'Resource manage': '资源管理', 'Function manage': '函数管理', 'Edit password': '修改密码', 'Ordinary users': '普通用户', 'Create process': '创建工作流', 'Import process': '导入工作流', 'Timing state': '定时状态', Timing: '定时', Timezone: '时区', TreeView: '树形图', 'Mailbox already exists! Recipients and copyers cannot repeat': '邮箱已存在!收件人和抄送人不能重复', 'Mailbox input is illegal': '邮箱输入不合法', 'Please set the parameters before starting': '启动前请先设置参数', Continue: '继续', End: '结束', 'Node execution': '节点执行', 'Backward execution': '向后执行', 'Forward execution': '向前执行', 'Execute only the current node': '仅执行当前节点', 'Notification strategy': '通知策略', 'Notification group': '通知组', 'Please select a notification group': '请选择通知组', receivers: '收件人', receiverCcs: '抄送人', 'Whether it is a complement process?': '是否补数', 'Schedule date': '调度日期', 'Mode of execution': '执行方式', 'Serial execution': '串行执行', 'Parallel execution': '并行执行', 'Set parameters before timing': '定时前请先设置参数', 'Start and stop time': '起止时间', 'Please select time': '请选择时间', 'Please enter crontab': '请输入crontab', none_1: '都不发', success_1: '成功发', failure_1: '失败发', All_1: '成功或失败都发', Toolbar: '工具栏', 'View variables': '查看变量', 'Format DAG': '格式化DAG', 'Refresh DAG status': '刷新DAG状态', Return_1: '返回上一节点', 'Please enter format': '请输入格式为', 'connection parameter': '连接参数', 'Process definition details': '流程定义详情', 'Create process definition': '创建流程定义', 'Scheduled task list': '定时任务列表', 'Process instance details': '流程实例详情', 'Create Resource': '创建资源', 'User Center': '用户中心', AllStatus: '全部状态', None: '无', Name: '名称', 'Process priority': '流程优先级', 'Task priority': '任务优先级', 'Task timeout alarm': '任务超时告警', 'Timeout strategy': '超时策略', 'Timeout alarm': '超时告警', 'Timeout failure': '超时失败', 'Timeout period': '超时时长', 'Waiting Dependent complete': '等待依赖完成', 'Waiting Dependent start': '等待依赖启动', 'Check interval': '检查间隔', 'Timeout must be longer than check interval': '超时时间必须比检查间隔长', 'Timeout strategy must be selected': '超时策略必须选一个', 'Timeout must be a positive integer': '超时时长必须为正整数', 'Add dependency': '添加依赖', and: '且', or: '或', month: '月', week: '周', day: '日', hour: '时', Running: '正在运行', 'Waiting for dependency to complete': '等待依赖完成', Selected: '已选', CurrentHour: '当前小时', Last1Hour: '前1小时', Last2Hours: '前2小时', Last3Hours: '前3小时', Last24Hours: '前24小时', today: '今天', Last1Days: '昨天', Last2Days: '前两天', Last3Days: '前三天', Last7Days: '前七天', ThisWeek: '本周', LastWeek: '上周', LastMonday: '上周一', LastTuesday: '上周二', LastWednesday: '上周三', LastThursday: '上周四', LastFriday: '上周五', LastSaturday: '上周六', LastSunday: '上周日', ThisMonth: '本月', LastMonth: '上月', LastMonthBegin: '上月初', LastMonthEnd: '上月末', 'Refresh status succeeded': '刷新状态成功', 'Queue manage': 'Yarn 队列管理', 'Create queue': '创建队列', 'Edit queue': '编辑队列', 'Datasource manage': '数据源中心', 'History task record': '历史任务记录', 'Please go online': '不要忘记上线', 'Queue value': '队列值', 'Please enter queue value': '请输入队列值', 'Worker group manage': 'Worker分组管理', 'Create worker group': '创建Worker分组', 'Edit worker group': '编辑Worker分组', 'Token manage': '令牌管理', 'Create token': '创建令牌', 'Edit token': '编辑令牌', Addresses: '地址', 'Worker Addresses': 'Worker地址', 'Please select the worker addresses': '请选择Worker地址', 'Failure time': '失效时间', 'Expiration time': '失效时间', User: '用户', 'Please enter token': '请输入令牌', 'Generate token': '生成令牌', Monitor: '监控中心', Group: '分组', 'Queue statistics': '队列统计', 'Command status statistics': '命令状态统计', 'Task kill': '等待kill任务', 'Task queue': '等待执行任务', 'Error command count': '错误指令数', 'Normal command count': '正确指令数', Manage: '管理', 'Number of connections': '连接数', Sent: '发送量', Received: '接收量', 'Min latency': '最低延时', 'Avg latency': '平均延时', 'Max latency': '最大延时', 'Node count': '节点数', 'Query time': '当前查询时间', 'Node self-test status': '节点自检状态', 'Health status': '健康状态', 'Max connections': '最大连接数', 'Threads connections': '当前连接数', 'Max used connections': '同时使用连接最大数', 'Threads running connections': '数据库当前活跃连接数', 'Worker group': 'Worker分组', 'Please enter a positive integer greater than 0': '请输入大于 0 的正整数', 'Pre Statement': '前置sql', 'Post Statement': '后置sql', 'Statement cannot be empty': '语句不能为空', 'Process Define Count': '工作流定义数', 'Process Instance Running Count': '正在运行的流程数', 'command number of waiting for running': '待执行的命令数', 'failure command number': '执行失败的命令数', 'tasks number of waiting running': '待运行任务数', 'task number of ready to kill': '待杀死任务数', 'Statistics manage': '统计管理', statistics: '统计', 'select tenant': '选择租户', 'Please enter Principal': '请输入Principal', 'Please enter the kerberos authentication parameter java.security.krb5.conf': '请输入kerberos认证参数 java.security.krb5.conf', 'Please enter the kerberos authentication parameter login.user.keytab.username': '请输入kerberos认证参数 login.user.keytab.username', 'Please enter the kerberos authentication parameter login.user.keytab.path': '请输入kerberos认证参数 login.user.keytab.path', 'The start time must not be the same as the end': '开始时间和结束时间不能相同', 'Startup parameter': '启动参数', 'Startup type': '启动类型', 'warning of timeout': '超时告警', 'Next five execution times': '接下来五次执行时间', 'Execute time': '执行时间', 'Complement range': '补数范围', 'Http Url': '请求地址', 'Http Method': '请求类型', 'Http Parameters': '请求参数', 'Http Parameters Key': '参数名', 'Http Parameters Position': '参数位置', 'Http Parameters Value': '参数值', 'Http Check Condition': '校验条件', 'Http Condition': '校验内容', 'Please Enter Http Url': '请填写请求地址(必填)', 'Please Enter Http Condition': '请填写校验内容', 'There is no data for this period of time': '该时间段无数据', 'Worker addresses cannot be empty': 'Worker地址不能为空', 'Please generate token': '请生成Token', 'Please Select token': '请选择Token失效时间', 'Spark Version': 'Spark版本', TargetDataBase: '目标库', TargetTable: '目标表', 'Please enter the table of target': '请输入目标表名', 'Please enter a Target Table(required)': '请输入目标表(必填)', SpeedByte: '限流(字节数)', SpeedRecord: '限流(记录数)', '0 means unlimited by byte': 'KB,0代表不限制', '0 means unlimited by count': '0代表不限制', 'Modify User': '修改用户', 'Whether directory': '是否文件夹', Yes: '是', No: '否', 'Hadoop Custom Params': 'Hadoop参数', 'Sqoop Advanced Parameters': 'Sqoop参数', 'Sqoop Job Name': '任务名称', 'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)', 'Please enter Mysql Table(required)': '请输入Mysql表名(必填)', 'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开', 'Please enter Target Dir(required)': '请输入目标路径(必填)', 'Please enter Export Dir(required)': '请输入数据源路径(必填)', 'Please enter Hive Database(required)': '请输入Hive数据库(必填)', 'Please enter Hive Table(required)': '请输入Hive表名(必填)', 'Please enter Hive Partition Keys': '请输入分区键', 'Please enter Hive Partition Values': '请输入分区值', 'Please enter Replace Delimiter': '请输入替换分隔符', 'Please enter Fields Terminated': '请输入列分隔符', 'Please enter Lines Terminated': '请输入行分隔符', 'Please enter Concurrency': '请输入并发度', 'Please enter Update Key': '请输入更新列', 'Please enter Job Name(required)': '请输入任务名称(必填)', 'Please enter Custom Shell(required)': '请输入自定义脚本', Direct: '流向', Type: '类型', ModelType: '模式', ColumnType: '列类型', Database: '数据库', Column: '列', 'Map Column Hive': 'Hive类型映射', 'Map Column Java': 'Java类型映射', 'Export Dir': '数据源路径', 'Hive partition Keys': 'Hive 分区键', 'Hive partition Values': 'Hive 分区值', FieldsTerminated: '列分隔符', LinesTerminated: '行分隔符', IsUpdate: '是否更新', UpdateKey: '更新列', UpdateMode: '更新类型', 'Target Dir': '目标路径', DeleteTargetDir: '是否删除目录', FileType: '保存格式', CompressionCodec: '压缩类型', CreateHiveTable: '是否创建新表', DropDelimiter: '是否删除分隔符', OverWriteSrc: '是否覆盖数据源', ReplaceDelimiter: '替换分隔符', Concurrency: '并发度', Form: '表单', OnlyUpdate: '只更新', AllowInsert: '无更新便插入', 'Data Source': '数据来源', 'Data Target': '数据目的', 'All Columns': '全表导入', 'Some Columns': '选择列', 'Branch flow': '分支流转', 'Custom Job': '自定义任务', 'Custom Script': '自定义脚本', 'Cannot select the same node for successful branch flow and failed branch flow': '成功分支流转和失败分支流转不能选择同一个节点', 'Successful branch flow and failed branch flow are required': 'conditions节点成功和失败分支流转必填', 'No resources exist': '不存在资源', 'Please delete all non-existing resources': '请删除所有不存在资源', 'Unauthorized or deleted resources': '未授权或已删除资源', 'Please delete all non-existent resources': '请删除所有未授权或已删除资源', Kinship: '工作流关系', Reset: '重置', KinshipStateActive: '当前选择', KinshipState1: '已上线', KinshipState0: '工作流未上线', KinshipState10: '调度未上线', 'Dag label display control': 'Dag节点名称显隐', Enable: '启用', Disable: '停用', 'The Worker group no longer exists, please select the correct Worker group!': '该Worker分组已经不存在,请选择正确的Worker分组!', 'Please confirm whether the workflow has been saved before downloading': '下载前请确定工作流是否已保存', 'User name length is between 3 and 39': '用户名长度在3~39之间', 'Timeout Settings': '超时设置', 'Connect Timeout': '连接超时', 'Socket Timeout': 'Socket超时', 'Connect timeout be a positive integer': '连接超时必须为数字', 'Socket Timeout be a positive integer': 'Socket超时必须为数字', ms: '毫秒', 'Please Enter Url': '请直接填写地址,例如:127.0.0.1:7077', Master: 'Master', 'Please select the waterdrop resources': '请选择waterdrop配置文件', zkDirectory: 'zk注册目录', 'Directory detail': '查看目录详情', 'Connection name': '连线名', 'Current connection settings': '当前连线设置', 'Please save the DAG before formatting': '格式化前请先保存DAG', 'Batch copy': '批量复制', 'Related items': '关联项目', 'Project name is required': '项目名称必填', 'Batch move': '批量移动', Version: '版本', 'Pre tasks': '前置任务', 'Running Memory': '运行内存', 'Max Memory': '最大内存', 'Min Memory': '最小内存', 'The workflow canvas is abnormal and cannot be saved, please recreate': '该工作流画布异常,无法保存,请重新创建', Info: '提示', 'Datasource userName': '所属用户', 'Resource userName': '所属用户' }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper"> <sql id="baseSql"> id, code, name, version, release_state, project_code, user_id, description, global_params, flag, locations, connects, warning_group_id, create_time, timeout, tenant_id, update_time </sql> <select id="verifyByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select pd.id, pd.code, pd.name, pd.version, pd.release_state, pd.project_code, pd.user_id, pd.description, pd.global_params, pd.flag, pd.locations, pd.connects, pd.warning_group_id, pd.create_time, pd.timeout, pd.tenant_id, pd.update_time from t_ds_process_definition pd WHERE pd.project_code = #{projectCode} and pd.name = #{processDefinitionName} </select> <delete id="deleteByCode"> delete from t_ds_process_definition where code = #{code} </delete> <select id="queryByCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select <include refid="baseSql"/> from t_ds_process_definition where code = #{code} </select> <select id="queryByCodes" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select <include refid="baseSql"/> from t_ds_process_definition where 1 = 1 <if test="codes != null and codes.size() != 0"> and code in <foreach collection="codes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> </select> <select id="queryByDefineName" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select pd.id, pd.code, pd.name, pd.version, pd.release_state, pd.project_code, p.id as project_id, pd.user_id, pd.description, pd.global_params, pd.flag, pd.locations, pd.connects, pd.warning_group_id, pd.create_time, pd.timeout, pd.tenant_id, pd.update_time, u.user_name,p.name as project_name,t.tenant_code,q.queue,q.queue_name from t_ds_process_definition pd JOIN t_ds_user u ON pd.user_id = u.id JOIN t_ds_project p ON pd.project_code = p.code JOIN t_ds_tenant t ON t.id = u.tenant_id JOIN t_ds_queue q ON t.queue_id = q.id WHERE p.code = #{projectCode} and pd.name = #{processDefinitionName} </select> <select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> SELECT td.id, td.code, td.name, td.version, td.release_state, td.project_code, td.user_id, td.description, td.global_params, td.flag, td.warning_group_id, td.timeout, td.tenant_id, td.update_time, td.create_time, sc.schedule_release_state, tu.user_name FROM t_ds_process_definition td left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id left join t_ds_user tu on td.user_id = tu.id where td.project_code = #{projectCode} <if test=" searchVal != null and searchVal != ''"> and td.name like concat('%', #{searchVal}, '%') </if> <if test=" userId != 0"> and td.user_id = #{userId} </if> order by sc.schedule_release_state desc,td.update_time desc </select> <select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select <include refid="baseSql"/> from t_ds_process_definition where project_code = #{projectCode} order by create_time desc </select> <select id="queryDefinitionListByTenant" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select <include refid="baseSql"/> from t_ds_process_definition where tenant_id = #{tenantId} </select> <select id="queryDefinitionListByIdList" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> select <include refid="baseSql"/> from t_ds_process_definition where id in <foreach collection="ids" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </select> <select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser"> SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count FROM t_ds_process_definition td JOIN t_ds_user tu on tu.id=td.user_id where 1 = 1 <if test="projectCodes != null and projectCodes.length != 0"> and td.project_code in <foreach collection="projectCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> group by td.user_id,tu.user_name </select> <select id="queryByDefineId" resultType="org.apache.dolphinscheduler.dao.entity.ProcessDefinition"> SELECT pd.id, pd.code, pd.name, pd.version, pd.release_state, pd.project_code, pd.user_id, pd.description, pd.global_params, pd.flag, pd.locations, pd.connects, pd.warning_group_id, pd.create_time, pd.timeout, pd.tenant_id, pd.update_time, u.user_name,p.name AS project_name FROM t_ds_process_definition pd, t_ds_user u, t_ds_project p WHERE pd.user_id = u.id AND pd.project_code = p.code AND pd.id = #{processDefineId} </select> <select id="listResources" resultType="java.util.HashMap"> SELECT distinct pd.code,td.resource_ids FROM t_ds_process_task_relation ptr join t_ds_process_definition pd on ptr.process_definition_code=pd.code and ptr.process_definition_version = pd.version and ptr.project_code=pd.project_code and pd.release_state = 1 join t_ds_task_definition td on (ptr.pre_task_code=td.code and ptr.pre_task_version=td.version) or (ptr.pre_task_code=td.code and ptr.pre_task_version=td.version) WHERE td.resource_ids is not null and td.resource_ids != '' </select> <select id="listResourcesByUser" resultType="java.util.HashMap"> SELECT distinct pd.code,td.resource_ids FROM t_ds_process_task_relation ptr join t_ds_process_definition pd on ptr.process_definition_code=pd.code and ptr.process_definition_version = pd.version and ptr.project_code=pd.project_code and pd.release_state = 1 join t_ds_task_definition td on (ptr.pre_task_code=td.code and ptr.pre_task_version=td.version) or (ptr.pre_task_code=td.code and ptr.pre_task_version=td.version) WHERE td.resource_ids is not null and td.resource_ids != '' and td.user_id = #{userId} </select> <select id="listProjectIds" resultType="java.lang.Integer"> SELECT DISTINCT(id) as project_id FROM t_ds_project </select> <update id="updateVersionByProcessDefinitionId"> update t_ds_process_definition set version = #{version} where id = #{processDefinitionId} </update> <select id="queryHasAssociatedDefinitionByIdAndVersion" resultType="java.lang.Integer"> select id from t_ds_process_definition where id = #{processDefinitionId} and version = #{version} limit 1 </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProjectMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProjectMapper"> <sql id="baseSql"> id, name, code, description, user_id, flag, create_time, update_time </sql> <sql id="baseSqlV2"> ${alias}.id, ${alias}.name, ${alias}.code, ${alias}.description, ${alias}.user_id, ${alias}.flag, ${alias}.create_time, ${alias}.update_time </sql> <select id="queryByCode" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where code = #{code} </select> <select id="queryDetailByCode" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.code = #{projectCode} </select> <select id="queryDetailById" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.id = #{projectId} </select> <select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name from t_ds_project p join t_ds_user u on p.user_id = u.id where p.name = #{projectName} limit 1 </select> <select id="queryProjectListPaging" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> , u.user_name as user_name, (SELECT COUNT(*) FROM t_ds_process_definition AS def WHERE def.project_code = p.code) AS def_count, (SELECT COUNT(*) FROM t_ds_process_definition_log def, t_ds_process_instance inst WHERE def.code = inst.process_definition_code and def.version = inst.process_definition_version AND def.project_code = p.code AND inst.state=1 ) as inst_running_count from t_ds_project p left join t_ds_user u on u.id=p.user_id where 1=1 <if test="userId != 0"> and p.id in (select project_id from t_ds_relation_project_user where user_id=#{userId} union select id as project_id from t_ds_project where user_id=#{userId} ) </if> <if test="searchName!=null and searchName != ''"> and p.name like concat('%', #{searchName}, '%') </if> order by p.create_time desc </select> <select id="queryAuthedProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> from t_ds_project p,t_ds_relation_project_user rel where p.id = rel.project_id and rel.user_id= #{userId} </select> <select id="queryRelationProjectListByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSqlV2"> <property name="alias" value="p"/> </include> from t_ds_project p left join t_ds_relation_project_user rel on p.id = rel.project_id where 1=1 <if test="userId != 0 "> and rel.user_id= #{userId} </if> </select> <select id="queryProjectExceptUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where user_id <![CDATA[ <> ]]> #{userId} </select> <select id="queryProjectCreatedByUser" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where user_id = #{userId} </select> <select id="queryProjectCreatedAndAuthorizedByUserId" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select <include refid="baseSql"/> from t_ds_project where id in (select project_id from t_ds_relation_project_user where user_id=#{userId} union select id as project_id from t_ds_project where user_id=#{userId}) </select> <select id="queryProjectWithUserByProcessInstanceId" resultType="org.apache.dolphinscheduler.dao.entity.ProjectUser"> select dp.id project_id, dp.name project_name, u.user_name user_name from t_ds_process_instance di join t_ds_process_definition dpd on di.process_definition_code = dpd.code join t_ds_project dp on dpd.project_code = dp.code join t_ds_user u on dp.user_id = u.id where di.id = #{processInstanceId}; </select> <select id="queryAllProject" resultType="org.apache.dolphinscheduler.dao.entity.Project"> select * from t_ds_project </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,304
[Improvement][dao]When I search for the keyword Modify User, the web UI shows empty
**Describe the question** The search function in the project module is currently only fuzzy matching by workflow name **What are the current deficiencies and the benefits of improvement** - Keyword query is too single, we can fuzzy query by each column name, similar to yarn web UI interface **Which version of DolphinScheduler:** -[1.3.x] -[dev] **Describe alternatives you've considered** We modify the source code to support fuzzy search for each column. If the amount of data is huge, we can consider introducing Elasticsearch components as a search engine <img width="814" alt="1618562781(1)" src="https://user-images.githubusercontent.com/22792154/115001364-25d77800-9ed6-11eb-8306-791d95d79d7b.png">
https://github.com/apache/dolphinscheduler/issues/5304
https://github.com/apache/dolphinscheduler/pull/5952
1887bde1ebf8249de153890e78d449582e3eaf4a
04423260a16b03221e4db23f3d99e5d21212fa29
"2021-04-16T09:11:27Z"
java
"2021-08-17T16:53:12Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.task.sql; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.sql.SqlBinds; import org.apache.dolphinscheduler.common.task.sql.SqlParameters; import org.apache.dolphinscheduler.common.task.sql.SqlType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.remote.command.alert.AlertSendResponseCommand; import org.apache.dolphinscheduler.server.entity.SQLTaskExecutionContext; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.utils.ParamUtils; import org.apache.dolphinscheduler.server.utils.UDFUtils; import org.apache.dolphinscheduler.server.worker.task.AbstractTask; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; /** * sql task */ public class SqlTask extends AbstractTask { /** * sql parameters */ private SqlParameters sqlParameters; /** * alert dao */ private AlertDao alertDao; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * taskExecutionContext */ private TaskExecutionContext taskExecutionContext; private AlertClientService alertClientService; public SqlTask(TaskExecutionContext taskExecutionContext, Logger logger, AlertClientService alertClientService) { super(taskExecutionContext, logger); this.taskExecutionContext = taskExecutionContext; logger.info("sql task params {}", taskExecutionContext.getTaskParams()); this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } this.alertClientService = alertClientService; this.alertDao = SpringApplicationContext.getBean(AlertDao.class); } @Override public void handle() throws Exception { // set the name of the current thread String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {},varPool : {} ,query max result limit {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams(), sqlParameters.getVarPool(), sqlParameters.getLimit()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); // get datasource baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); // ready to execute SQL and parameter entity Map SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = UDFUtils.createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), logger); // execute sql task executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(Constants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(Constants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext,getParameters()); // spell SQL according to the final user-defined variable if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) { String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } //new //replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); // special characters need to be escaped, ${} needs to be escaped String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); //Replace the original value in sql !{...} ,Does not participate in precompilation String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // replace the ${} of the SQL statement with the Placeholder String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // print repalce sql printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } public String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue(); content = m.replaceFirst(paramValue); } return content; } @Override public AbstractParameters getParameters() { return this.sqlParameters; } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // create connection connection = DatasourceUtil.getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // create temp function if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); } // pre sql preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // decide whether to executeQuery or executeUpdate based on sqlType if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // query statements need to be convert to JsonArray and inserted into Alert to send resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // non query statement String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } //deal out params sqlParameters.dealOutParam(result); postSql(connection, postStatementsBinds); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } public String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info :properties) { if (Direct.OUT == info.getDirect()) { List<Map<String,String>> updateRL = new ArrayList<>(); Map<String,String> updateRM = new HashMap<>(); updateRM.put(info.getProp(),updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; while (rowCount < sqlParameters.getLimit() && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : Constants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size()); logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * pre sql * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } } /** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * create temp function * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } } /** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); } } } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) throws Exception { // is the timeout set boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql()); if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } /** * send mail as an attachment * * @param title title * @param content content */ public void sendAttachment(int groupId, String title, String content) { AlertSendResponseCommand alertSendResponseCommand = alertClientService.sendAlert(groupId, title, content); if (!alertSendResponseCommand.getResStatus()) { throw new RuntimeException("send mail failed!"); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); if (prop == null) { logger.error("setSqlParamsMap: No Property with paramName: {} is found in paramsPropsMap of task instance" + " with id: {}. So couldn't put Property in sqlParamsMap.", paramName, taskExecutionContext.getTaskInstanceId()); } else { sqlParamsMap.put(index, prop); index++; logger.info("setSqlParamsMap: Property with paramName: {} put in sqlParamsMap of content {} successfully.", paramName, content); } } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ public void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //parameter print style logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); if (sqlParamsMap == null) { logger.info("printReplacedSql: sqlParamsMap is null."); } else { for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue() + "(" + sqlParamsMap.get(i).getType() + ")"); } } logger.info("Sql Params are {}", logPrint); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,024
[Improvement][dist] Remove useless packaging commands
**Describe the question** Remove useless packaging commands **What are the current deficiencies and the benefits of improvement** In the common module, there is no Src / main / resources / bin directory, but the dist module will still execute relevant commands when packaging, which is unreasonable **Which version of DolphinScheduler:** -[1.3.7-release] **Describe alternatives you've considered** Remove the dolphinscheduler-common / src / main / resources / bin directory packaging command in dolphinscheduler -bin.xml in dist module
https://github.com/apache/dolphinscheduler/issues/6024
https://github.com/apache/dolphinscheduler/pull/6029
7b8579310f5b11c3e8195b85873eaa920d11bc58
67dde65d3207d325d344e472a4be57286a1d379d
"2021-08-23T07:13:29Z"
java
"2021-08-25T11:27:16Z"
dolphinscheduler-dist/src/main/assembly/dolphinscheduler-bin.xml
<!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd"> <id>bin</id> <formats> <format>tar.gz</format> </formats> <includeBaseDirectory>true</includeBaseDirectory> <baseDirectory>${project.build.finalName}-bin</baseDirectory> <fileSets> <fileSet> <directory>${basedir}/../dolphinscheduler-alert/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> <include>**/*.ftl</include> </includes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-api/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> </includes> <excludes> <exclude>application.properties</exclude> </excludes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-common/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> </includes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-common/src/main/resources/bin</directory> <includes> <include>*.*</include> </includes> <directoryMode>755</directoryMode> <outputDirectory>bin</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-dao/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> </includes> <excludes> <exclude>org/apache/dolphinscheduler/dao/mapper/*.xml</exclude> </excludes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-server/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> <include>config/*.*</include> </includes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-service/src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> <include>**/*.yml</include> </includes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>src/main/resources</directory> <includes> <include>**/*.properties</include> <include>**/*.xml</include> <include>**/*.json</include> </includes> <outputDirectory>conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-server/target/dolphinscheduler-server-${project.version}</directory> <includes> <include>**/*.*</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-api/target/dolphinscheduler-api-${project.version}</directory> <includes> <include>**/*.*</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-alert/target/dolphinscheduler-alert-${project.version}</directory> <includes> <include>**/*.*</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-dist/target/dolphinscheduler-dist-${project.version}</directory> <includes> <include>**/*.*</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../dolphinscheduler-ui/dist</directory> <includes> <include>**/*.*</include> </includes> <outputDirectory>./ui</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../sql</directory> <includes> <include>**/*</include> </includes> <outputDirectory>./sql</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../script</directory> <includes> <include>*.*</include> </includes> <outputDirectory>./script</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../script</directory> <includes> <include>env/*.*</include> </includes> <outputDirectory>./conf</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/../script</directory> <includes> <include>start-all.sh</include> <include>stop-all.sh</include> <include>dolphinscheduler-daemon.sh</include> <include>status-all.sh</include> </includes> <outputDirectory>./bin</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/.././</directory> <includes> <include>*.sh</include> <include>*.py</include> <include>DISCLAIMER</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> <fileSet> <directory>${basedir}/release-docs</directory> <useDefaultExcludes>true</useDefaultExcludes> <includes> <include>**/*</include> </includes> <outputDirectory>.</outputDirectory> </fileSet> </fileSets> <dependencySets> <dependencySet> <outputDirectory>lib</outputDirectory> <useProjectArtifact>true</useProjectArtifact> <excludes> <exclude>org.apache.dolphinscheduler:dolphinscheduler-dist</exclude> <exclude>javax.servlet:servlet-api</exclude> <exclude>org.eclipse.jetty.aggregate:jetty-all</exclude> <exclude>org.slf4j:slf4j-log4j12</exclude> </excludes> </dependencySet> </dependencySets> </assembly>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
5,908
[Bug][MasterServer] When executing an compensation task, the execution thread would have a NPE
In the latest dev branch, if the task executes the compensation task, after the task is executed successfully, we can found that the Master throws an exception which would cause the process shows failed. ```java [ERROR] 2021-07-28 12:58:55.462 org.apache.dolphinscheduler.server.master.runner.MasterExecThread:[224] - master exec thread exception java.lang.NullPointerException: null at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.executeComplementProcess(MasterExecThread.java:331) at org.apache.dolphinscheduler.server.master.runner.MasterExecThread.run(MasterExecThread.java:218) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ```
https://github.com/apache/dolphinscheduler/issues/5908
https://github.com/apache/dolphinscheduler/pull/5909
67dde65d3207d325d344e472a4be57286a1d379d
2fa3b419a0598c499ae0e9cb39f2402f43718418
"2021-07-28T11:52:52Z"
java
"2021-08-25T14:19:28Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterExecThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.Lists; /** * master exec thread,split dag */ public class MasterExecThread implements Runnable { /** * logger of MasterExecThread */ private static final Logger logger = LoggerFactory.getLogger(MasterExecThread.class); /** * runing TaskNode */ private final Map<MasterBaseTaskExecThread, Future<Boolean>> activeTaskNode = new ConcurrentHashMap<>(); /** * task exec service */ private final ExecutorService taskExecService; /** * process instance */ private ProcessInstance processInstance; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * recover node id list */ private List<TaskInstance> recoverNodeIdList = new ArrayList<>(); /** * error task list */ private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>(); /** * complete task list */ private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * depend failed task map */ private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>(); /** * forbidden task map */ private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>(); /** * skip task map */ private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>(); /** * recover tolerance fault task list */ private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>(); /** * alert manager */ private ProcessAlertManager processAlertManager; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * process service */ private ProcessService processService; /** * master config */ private MasterConfig masterConfig; /** * */ private NettyRemotingClient nettyRemotingClient; /** * submit post node * * @param parentNodeName parent node name */ private Map<String, Object> propToValue = new ConcurrentHashMap<>(); /** * constructor of MasterExecThread * * @param processInstance processInstance * @param processService processService * @param nettyRemotingClient nettyRemotingClient */ public MasterExecThread(ProcessInstance processInstance , ProcessService processService , NettyRemotingClient nettyRemotingClient , ProcessAlertManager processAlertManager , MasterConfig masterConfig) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; int masterTaskExecNum = masterConfig.getMasterExecTaskNum(); this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread", masterTaskExecNum); this.nettyRemotingClient = nettyRemotingClient; this.processAlertManager = processAlertManager; } @Override public void run() { // process instance is null if (processInstance == null) { logger.info("process instance is not exists"); return; } // check to see if it's done if (processInstance.getState().typeIsFinished()) { logger.info("process instance is done : {}", processInstance.getId()); return; } try { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { // sub process complement data executeComplementProcess(); } else { // execute flow executeProcess(); } } catch (Exception e) { logger.error("master exec thread exception", e); logger.error("process execute failed, process id:{}", processInstance.getId()); processInstance.setState(ExecutionStatus.FAILURE); processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); } finally { taskExecService.shutdown(); } } /** * execute process * * @throws Exception exception */ private void executeProcess() throws Exception { prepareProcess(); runProcess(); endProcess(); } /** * execute complement process * * @throws Exception exception */ private void executeComplementProcess() throws Exception { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); processService.saveProcessInstance(processInstance); // get schedules int processDefinitionId = processInstance.getProcessDefinition().getId(); List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionId(processDefinitionId); List<Date> listDate = Lists.newLinkedList(); if (!CollectionUtils.isEmpty(schedules)) { for (Schedule schedule : schedules) { listDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedule.getCrontab())); } } // get first fire date Iterator<Date> iterator = null; Date scheduleDate; if (!CollectionUtils.isEmpty(listDate)) { iterator = listDate.iterator(); scheduleDate = iterator.next(); processInstance.setScheduleTime(scheduleDate); processService.updateProcessInstance(processInstance); } else { scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = startDate; } } while (Stopper.isRunning()) { logger.info("process {} start to complement {} data", processInstance.getId(), DateUtils.dateToString(scheduleDate)); // prepare dag and other info prepareProcess(); if (dag == null) { logger.error("process {} dag is null, please check out parameters", processInstance.getId()); processInstance.setState(ExecutionStatus.SUCCESS); processService.updateProcessInstance(processInstance); return; } // execute process ,waiting for end runProcess(); endProcess(); // process instance failure ,no more complements if (!processInstance.getState().typeIsSuccess()) { logger.info("process {} state {}, complement not completely!", processInstance.getId(), processInstance.getState()); break; } // current process instance success ,next execute if (null == iterator) { // loop by day scheduleDate = DateUtils.getSomeDay(scheduleDate, 1); if (scheduleDate.after(endDate)) { // all success logger.info("process {} complement completely!", processInstance.getId()); break; } } else { // loop by schedule date if (!iterator.hasNext()) { // all success logger.info("process {} complement completely!", processInstance.getId()); break; } scheduleDate = iterator.next(); } // flow end // execute next process instance complement data processInstance.setScheduleTime(scheduleDate); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processInstance.getProcessDefinition().getGlobalParamMap(), processInstance.getProcessDefinition().getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setId(0); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); } } /** * prepare process parameter * * @throws Exception exception */ private void prepareProcess() throws Exception { // gen process dag buildFlowDag(); // init task queue initTaskQueue(); logger.info("prepare process :{} end", processInstance.getId()); } /** * process end handle */ private void endProcess() { processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.genTaskNodeList(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion(), new HashMap<>()); forbiddenTaskList.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskList.put(taskNode.getName(), taskNode); } }); // generate process to get DAG info List<String> recoveryNameList = getRecoveryNodeNameList(); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNameList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskNode.clear(); dependFailedTask.clear(); completeTaskList.clear(); errorTaskList.clear(); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : taskInstanceList) { if (task.isTaskComplete()) { completeTaskList.put(task.getName(), task); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskList.put(task.getName(), task); } } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { MasterBaseTaskExecThread abstractExecThread = null; if (taskInstance.isSubProcess()) { abstractExecThread = new SubProcessTaskExecThread(taskInstance); } else if (taskInstance.isDependTask()) { abstractExecThread = new DependentTaskExecThread(taskInstance); } else if (taskInstance.isConditionsTask()) { abstractExecThread = new ConditionsTaskExecThread(taskInstance); } else if (taskInstance.isSwitchTask()) { abstractExecThread = new SwitchTaskExecThread(taskInstance); } else { abstractExecThread = new MasterTaskExecThread(taskInstance); } Future<Boolean> future = taskExecService.submit(abstractExecThread); activeTaskNode.putIfAbsent(abstractExecThread, future); return abstractExecThread.getTaskInstance(); } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); } else { taskInstance.setWorkerGroup(taskWorkerGroup); } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String,Property> allProperty = new HashMap<>(); Map<String,TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskName : preTask) { TaskInstance preTaskInstance = completeTaskList.get(preTaskName); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName,preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName,preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName,preTaskInstance); } } private void submitPostNode(String parentNodeName) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); taskInstances.add(createTaskInstance(processInstance, taskNodeObject)); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskList.containsKey(task.getName())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); } else { addTaskToStandByList(task); } } } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskName) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskName)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskName); List<String> depNameList = taskNode.getDepList(); for (String depsNode : depNameList) { if (!dag.containsNode(depsNode) || forbiddenTaskList.containsKey(depsNode) || skipTaskNodeList.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskList.containsKey(depsNode)) { return DependResult.WAITING; } ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskName)) { return DependResult.FAILED; } } logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { if (entry.getValue().getState() == state) { resultList.add(entry.getValue()); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskList.size() > 0) { return true; } return this.dependFailedTask.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 || activeTaskNode.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @return process instance execution status */ private ExecutionStatus getProcessInstanceState() { ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); ExecutionStatus state = instance.getState(); if (activeTaskNode.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether standby task list have retry tasks */ private boolean retryTaskExists() { boolean result = false; for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { TaskInstance task = iter.next(); if (task.getState().typeIsFailure()) { result = true; break; } } return result; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ExecutionStatus state = getProcessInstanceState(); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); instance.setState(state); instance.setProcessDefinition(processInstance.getProcessDefinition()); processService.updateProcessInstance(instance); processInstance = instance; } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(taskInstance.getName()); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * submit and watch the tasks, until the work flow stop */ private void runProcess() { // submit start node submitPostNode(null); boolean sendTimeWarning = false; while (!processInstance.isProcessInstanceStop() && Stopper.isRunning()) { // send warning email if process time out. if (!sendTimeWarning && checkProcessTimeOut(processInstance)) { processAlertManager.sendProcessTimeoutAlert(processInstance, processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion())); sendTimeWarning = true; } for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) { Future<Boolean> future = entry.getValue(); TaskInstance task = entry.getKey().getTaskInstance(); if (!future.isDone()) { continue; } // node monitor thread complete task = this.processService.findTaskInstanceById(task.getId()); if (task == null) { this.taskFailedSubmit = true; activeTaskNode.remove(entry.getKey()); continue; } // node monitor thread complete if (task.getState().typeIsFinished()) { activeTaskNode.remove(entry.getKey()); } logger.info("task :{}, id:{} complete, state is {} ", task.getName(), task.getId(), task.getState()); // node success , post node submit if (task.getState() == ExecutionStatus.SUCCESS) { processInstance = processService.findProcessInstanceById(processInstance.getId()); processInstance.setVarPool(task.getVarPool()); processService.updateProcessInstance(processInstance); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } // node fails, retry first, and then execute the failure process if (task.getState().typeIsFailure()) { if (task.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) { this.recoverToleranceFaultTaskList.add(task); } if (task.taskCanRetry()) { addTaskToStandByList(task); } else { completeTaskList.put(task.getName(), task); if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { submitPostNode(task.getName()); } else { errorTaskList.put(task.getName(), task); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killTheOtherTasks(); } } } continue; } // other status stop/pause completeTaskList.put(task.getName(), task); } // send alert if (CollectionUtils.isNotEmpty(this.recoverToleranceFaultTaskList)) { processAlertManager.sendAlertWorkerToleranceFault(processInstance, recoverToleranceFaultTaskList); this.recoverToleranceFaultTaskList.clear(); } // updateProcessInstance completed task status // failure priority is higher than pause // if a task fails, other suspended tasks need to be reset kill // check if there exists forced success nodes in errorTaskList if (errorTaskList.size() > 0) { for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { TaskInstance completeTask = entry.getValue(); if (completeTask.getState() == ExecutionStatus.PAUSE) { completeTask.setState(ExecutionStatus.KILL); completeTaskList.put(entry.getKey(), completeTask); processService.updateTaskInstance(completeTask); } } for (Map.Entry<String, TaskInstance> entry : errorTaskList.entrySet()) { TaskInstance errorTask = entry.getValue(); TaskInstance currentTask = processService.findTaskInstanceById(errorTask.getId()); if (currentTask == null) { continue; } // for nodes that have been forced success if (errorTask.getState().typeIsFailure() && currentTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { // update state in this thread and remove from errorTaskList errorTask.setState(currentTask.getState()); logger.info("task: {} has been forced success, remove it from error task list", errorTask.getName()); errorTaskList.remove(errorTask.getName()); // submit post nodes submitPostNode(errorTask.getName()); } } } if (canSubmitTaskToQueue()) { submitStandByTask(); } try { Thread.sleep(Constants.SLEEP_TIME_MILLIS); } catch (InterruptedException e) { logger.error(e.getMessage(), e); Thread.currentThread().interrupt(); } updateProcessInstanceState(); } logger.info("process:{} end, state :{}", processInstance.getId(), processInstance.getState()); } /** * whether check process time out * * @param processInstance task instance * @return true if time out of process instance > running time of process instance */ private boolean checkProcessTimeOut(ProcessInstance processInstance) { if (processInstance.getTimeout() == 0) { return false; } Date now = new Date(); long runningTime = DateUtils.diffMin(now, processInstance.getStartTime()); return runningTime > processInstance.getTimeout(); } /** * whether can submit task to queue * * @return boolean */ private boolean canSubmitTaskToQueue() { return OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory()); } /** * close the on going tasks */ private void killTheOtherTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskNode.size()); for (Map.Entry<MasterBaseTaskExecThread, Future<Boolean>> entry : activeTaskNode.entrySet()) { MasterBaseTaskExecThread taskExecThread = entry.getKey(); Future<Boolean> future = entry.getValue(); TaskInstance taskInstance = taskExecThread.getTaskInstance(); taskInstance = processService.findTaskInstanceById(taskInstance.getId()); if (taskInstance != null && taskInstance.getState().typeIsFinished()) { continue; } if (!future.isDone()) { // record kill info logger.info("kill process instance, id: {}, task: {}", processInstance.getId(), taskExecThread.getTaskInstance().getId()); // kill node taskExecThread.kill(); } } } /** * whether the retry interval is timed out * * @param taskInstance task instance * @return Boolean */ private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) { if (taskInstance.getState() != ExecutionStatus.FAILURE) { return true; } if (taskInstance.getId() == 0 || taskInstance.getMaxRetryTimes() == 0 || taskInstance.getRetryInterval() == 0) { return true; } Date now = new Date(); long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime()); // task retry does not over time, return false return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval; } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(task.getName()); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (retryTaskIntervalOverTime(task)) { submitTaskExec(task); removeTaskFromStandbyList(task); } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTask.put(task.getName(), task); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node name list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node name list */ private List<String> getRecoveryNodeNameList() { List<String> recoveryNodeNameList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeIdList)) { for (TaskInstance task : recoverNodeIdList) { recoveryNodeNameList.add(task.getName()); } } return recoveryNodeNameList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeNameList recovery node name list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeNameList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,045
[Bug][Master] Dead server kill itself error(when the register plugin is zookeeper)
**Describe the bug** When a master/worker session is lost from zookeeper, the Ephemeral node will be removed by zookeeper. And the failover node will do the failover job, it will create a node in zookeeper dead-server path. The `HeartBeatTask` of the dead server will check it has been in the dead-server, and it will kill itself. When it stops, it will unregister from the registry(zookeeper). https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java#L371-L379 https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java#L197-L204 The problem is that the dead server's Ephemeral node is already removed, when we use `client.delete()` it will throw an KeeperException.NoNodeException. **To Reproduce** 1. Start a master server 2. delete the Ephemeral node to simulate the master is dead. 3. see exception, and the master cannot shut down, since some other thread cannot exist. **Which version of Dolphin Scheduler:** -[dev] **Requirement or improvement** Ignore the KeeperException.NoNodeException, if the node is not exist, we can think the node has been deleted.
https://github.com/apache/dolphinscheduler/issues/6045
https://github.com/apache/dolphinscheduler/pull/6046
04720b327aef0649e9317573680874c20ea20ad5
839d6054eeb32c9efbab4836aa169e1c9c6ca417
"2021-08-27T09:38:16Z"
java
"2021-08-27T11:43:04Z"
dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.registry.zookeeper; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.BASE_SLEEP_TIME; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.BLOCK_UNTIL_CONNECTED_WAIT_MS; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.CONNECTION_TIMEOUT_MS; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.DIGEST; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.MAX_RETRIES; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.NAME_SPACE; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.SERVERS; import static org.apache.dolphinscheduler.plugin.registry.zookeeper.ZookeeperConfiguration.SESSION_TIMEOUT_MS; import static java.util.concurrent.TimeUnit.MILLISECONDS; import org.apache.dolphinscheduler.spi.register.DataChangeEvent; import org.apache.dolphinscheduler.spi.register.ListenerManager; import org.apache.dolphinscheduler.spi.register.Registry; import org.apache.dolphinscheduler.spi.register.RegistryConnectListener; import org.apache.dolphinscheduler.spi.register.RegistryException; import org.apache.dolphinscheduler.spi.register.SubscribeListener; import org.apache.curator.RetryPolicy; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.curator.framework.api.ACLProvider; import org.apache.curator.framework.api.transaction.TransactionOp; import org.apache.curator.framework.recipes.cache.TreeCache; import org.apache.curator.framework.recipes.cache.TreeCacheEvent; import org.apache.curator.framework.recipes.cache.TreeCacheListener; import org.apache.curator.framework.recipes.locks.InterProcessMutex; import org.apache.curator.retry.ExponentialBackoffRetry; import org.apache.curator.utils.CloseableUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.data.ACL; import java.nio.charset.StandardCharsets; import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; import com.google.common.base.Strings; public class ZookeeperRegistry implements Registry { private CuratorFramework client; /** * treeCache map * k-subscribe key * v-listener */ private Map<String, TreeCache> treeCacheMap = new HashMap<>(); /** * Distributed lock map */ private ThreadLocal<Map<String, InterProcessMutex>> threadLocalLockMap = new ThreadLocal<>(); /** * build retry policy */ private static RetryPolicy buildRetryPolicy(Map<String, String> registerData) { int baseSleepTimeMs = BASE_SLEEP_TIME.getParameterValue(registerData.get(BASE_SLEEP_TIME.getName())); int maxRetries = MAX_RETRIES.getParameterValue(registerData.get(MAX_RETRIES.getName())); int maxSleepMs = baseSleepTimeMs * maxRetries; return new ExponentialBackoffRetry(baseSleepTimeMs, maxRetries, maxSleepMs); } /** * build digest */ private static void buildDigest(CuratorFrameworkFactory.Builder builder, String digest) { builder.authorization(DIGEST.getName(), digest.getBytes(StandardCharsets.UTF_8)) .aclProvider(new ACLProvider() { @Override public List<ACL> getDefaultAcl() { return ZooDefs.Ids.CREATOR_ALL_ACL; } @Override public List<ACL> getAclForPath(final String path) { return ZooDefs.Ids.CREATOR_ALL_ACL; } }); } @Override public void init(Map<String, String> registerData) { CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder() .connectString(SERVERS.getParameterValue(registerData.get(SERVERS.getName()))) .retryPolicy(buildRetryPolicy(registerData)) .namespace(NAME_SPACE.getParameterValue(registerData.get(NAME_SPACE.getName()))) .sessionTimeoutMs(SESSION_TIMEOUT_MS.getParameterValue(registerData.get(SESSION_TIMEOUT_MS.getName()))) .connectionTimeoutMs(CONNECTION_TIMEOUT_MS.getParameterValue(registerData.get(CONNECTION_TIMEOUT_MS.getName()))); String digest = DIGEST.getParameterValue(registerData.get(DIGEST.getName())); if (!Strings.isNullOrEmpty(digest)) { buildDigest(builder, digest); } client = builder.build(); client.start(); try { if (!client.blockUntilConnected(BLOCK_UNTIL_CONNECTED_WAIT_MS.getParameterValue(registerData.get(BLOCK_UNTIL_CONNECTED_WAIT_MS.getName())), MILLISECONDS)) { client.close(); throw new RegistryException("zookeeper connect timeout"); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RegistryException("zookeeper connect error", e); } } @Override public void addConnectionStateListener(RegistryConnectListener registryConnectListener) { client.getConnectionStateListenable().addListener(new ZookeeperConnectionStateListener(registryConnectListener)); } @Override public boolean subscribe(String path, SubscribeListener subscribeListener) { if (null != treeCacheMap.get(path)) { return false; } TreeCache treeCache = new TreeCache(client, path); TreeCacheListener treeCacheListener = (client, event) -> { TreeCacheEvent.Type type = event.getType(); DataChangeEvent eventType = null; String dataPath = null; switch (type) { case NODE_ADDED: dataPath = event.getData().getPath(); eventType = DataChangeEvent.ADD; break; case NODE_UPDATED: eventType = DataChangeEvent.UPDATE; dataPath = event.getData().getPath(); break; case NODE_REMOVED: eventType = DataChangeEvent.REMOVE; dataPath = event.getData().getPath(); break; default: } if (null != eventType && null != dataPath) { ListenerManager.dataChange(path, dataPath, eventType); } }; treeCache.getListenable().addListener(treeCacheListener); treeCacheMap.put(path, treeCache); try { treeCache.start(); } catch (Exception e) { throw new RegistryException("start zookeeper tree cache error", e); } ListenerManager.addListener(path, subscribeListener); return true; } @Override public void unsubscribe(String path) { TreeCache treeCache = treeCacheMap.get(path); treeCache.close(); ListenerManager.removeListener(path); } @Override public String get(String key) { try { return new String(client.getData().forPath(key), StandardCharsets.UTF_8); } catch (Exception e) { throw new RegistryException("zookeeper get data error", e); } } @Override public void remove(String key) { try { client.delete().deletingChildrenIfNeeded().forPath(key); } catch (Exception e) { throw new RegistryException("zookeeper remove error", e); } } @Override public boolean isExisted(String key) { try { return null != client.checkExists().forPath(key); } catch (Exception e) { throw new RegistryException("zookeeper check key is existed error", e); } } @Override public void persist(String key, String value) { try { if (isExisted(key)) { update(key, value); return; } client.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } catch (Exception e) { throw new RegistryException("zookeeper persist error", e); } } @Override public void persistEphemeral(String key, String value) { try { if (isExisted(key)) { update(key, value); return; } client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(key, value.getBytes(StandardCharsets.UTF_8)); } catch (Exception e) { throw new RegistryException("zookeeper persist ephemeral error", e); } } @Override public void update(String key, String value) { try { if (!isExisted(key)) { return; } TransactionOp transactionOp = client.transactionOp(); client.transaction().forOperations(transactionOp.check().forPath(key), transactionOp.setData().forPath(key, value.getBytes(StandardCharsets.UTF_8))); } catch (Exception e) { throw new RegistryException("zookeeper update error", e); } } @Override public List<String> getChildren(String key) { try { List<String> result = client.getChildren().forPath(key); result.sort(Comparator.reverseOrder()); return result; } catch (Exception e) { throw new RegistryException("zookeeper get children error", e); } } @Override public boolean delete(String nodePath) { try { client.delete() .deletingChildrenIfNeeded() .forPath(nodePath); } catch (Exception e) { throw new RegistryException("zookeeper delete key error", e); } return true; } @Override public boolean acquireLock(String key) { InterProcessMutex interProcessMutex = new InterProcessMutex(client, key); try { interProcessMutex.acquire(); if (null == threadLocalLockMap.get()) { threadLocalLockMap.set(new HashMap<>(3)); } threadLocalLockMap.get().put(key, interProcessMutex); return true; } catch (Exception e) { try { interProcessMutex.release(); throw new RegistryException("zookeeper get lock error", e); } catch (Exception exception) { throw new RegistryException("zookeeper release lock error", e); } } } @Override public boolean releaseLock(String key) { if (null == threadLocalLockMap.get().get(key)) { return false; } try { threadLocalLockMap.get().get(key).release(); threadLocalLockMap.get().remove(key); if (threadLocalLockMap.get().isEmpty()) { threadLocalLockMap.remove(); } } catch (Exception e) { throw new RegistryException("zookeeper release lock error", e); } return true; } public CuratorFramework getClient() { return client; } @Override public void close() { treeCacheMap.forEach((key, value) -> value.close()); waitForCacheClose(500); CloseableUtils.closeQuietly(client); } private void waitForCacheClose(long millis) { try { Thread.sleep(millis); } catch (final InterruptedException ex) { Thread.currentThread().interrupt(); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,045
[Bug][Master] Dead server kill itself error(when the register plugin is zookeeper)
**Describe the bug** When a master/worker session is lost from zookeeper, the Ephemeral node will be removed by zookeeper. And the failover node will do the failover job, it will create a node in zookeeper dead-server path. The `HeartBeatTask` of the dead server will check it has been in the dead-server, and it will kill itself. When it stops, it will unregister from the registry(zookeeper). https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java#L371-L379 https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java#L197-L204 The problem is that the dead server's Ephemeral node is already removed, when we use `client.delete()` it will throw an KeeperException.NoNodeException. **To Reproduce** 1. Start a master server 2. delete the Ephemeral node to simulate the master is dead. 3. see exception, and the master cannot shut down, since some other thread cannot exist. **Which version of Dolphin Scheduler:** -[dev] **Requirement or improvement** Ignore the KeeperException.NoNodeException, if the node is not exist, we can think the node has been deleted.
https://github.com/apache/dolphinscheduler/issues/6045
https://github.com/apache/dolphinscheduler/pull/6046
04720b327aef0649e9317573680874c20ea20ad5
839d6054eeb32c9efbab4836aa169e1c9c6ca417
"2021-08-27T09:38:16Z"
java
"2021-08-27T11:43:04Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/WorkerServer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.NettyRemotingServer; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.config.NettyServerConfig; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.processor.DBTaskAckProcessor; import org.apache.dolphinscheduler.server.worker.processor.DBTaskResponseProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskExecuteProcessor; import org.apache.dolphinscheduler.server.worker.processor.TaskKillProcessor; import org.apache.dolphinscheduler.server.worker.registry.WorkerRegistryClient; import org.apache.dolphinscheduler.server.worker.runner.RetryReportTaskStatusThread; import org.apache.dolphinscheduler.server.worker.runner.WorkerManagerThread; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.Set; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.WebApplicationType; import org.springframework.boot.builder.SpringApplicationBuilder; import org.springframework.context.annotation.ComponentScan; import org.springframework.context.annotation.FilterType; import org.springframework.transaction.annotation.EnableTransactionManagement; /** * worker server */ @ComponentScan(value = "org.apache.dolphinscheduler", excludeFilters = { @ComponentScan.Filter(type = FilterType.REGEX, pattern = { "org.apache.dolphinscheduler.server.master.*", "org.apache.dolphinscheduler.server.monitor.*", "org.apache.dolphinscheduler.server.log.*" }) }) @EnableTransactionManagement public class WorkerServer implements IStoppable { /** * logger */ private static final Logger logger = LoggerFactory.getLogger(WorkerServer.class); /** * netty remote server */ private NettyRemotingServer nettyRemotingServer; /** * worker registry */ @Autowired private WorkerRegistryClient workerRegistryClient; /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * spring application context * only use it for initialization */ @Autowired private SpringApplicationContext springApplicationContext; /** * alert model netty remote server */ private AlertClientService alertClientService; @Autowired private RetryReportTaskStatusThread retryReportTaskStatusThread; @Autowired private WorkerManagerThread workerManagerThread; /** * worker server startup, not use web service * * @param args arguments */ public static void main(String[] args) { Thread.currentThread().setName(Constants.THREAD_NAME_WORKER_SERVER); new SpringApplicationBuilder(WorkerServer.class).web(WebApplicationType.NONE).run(args); } /** * worker server run */ @PostConstruct public void run() { // alert-server client registry alertClientService = new AlertClientService(workerConfig.getAlertListenHost(), Constants.ALERT_RPC_PORT); // init remoting server NettyServerConfig serverConfig = new NettyServerConfig(); serverConfig.setListenPort(workerConfig.getListenPort()); this.nettyRemotingServer = new NettyRemotingServer(serverConfig); this.nettyRemotingServer.registerProcessor(CommandType.TASK_EXECUTE_REQUEST, new TaskExecuteProcessor(alertClientService)); this.nettyRemotingServer.registerProcessor(CommandType.TASK_KILL_REQUEST, new TaskKillProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_ACK, new DBTaskAckProcessor()); this.nettyRemotingServer.registerProcessor(CommandType.DB_TASK_RESPONSE, new DBTaskResponseProcessor()); this.nettyRemotingServer.start(); // worker registry try { this.workerRegistryClient.registry(); this.workerRegistryClient.setRegistryStoppable(this); Set<String> workerZkPaths = this.workerRegistryClient.getWorkerZkPaths(); this.workerRegistryClient.handleDeadServer(workerZkPaths, NodeType.WORKER, Constants.DELETE_OP); } catch (Exception e) { logger.error(e.getMessage(), e); throw new RuntimeException(e); } // task execute manager this.workerManagerThread.start(); // retry report task status this.retryReportTaskStatusThread.start(); /** * registry hooks, which are called before the process exits */ Runtime.getRuntime().addShutdownHook(new Thread(() -> { if (Stopper.isRunning()) { close("shutdownHook"); } })); } public void close(String cause) { try { // execute only once if (Stopper.isStopped()) { return; } logger.info("worker server is stopping ..., cause : {}", cause); // set stop signal is true Stopper.stop(); try { // thread sleep 3 seconds for thread quitely stop Thread.sleep(3000L); } catch (Exception e) { logger.warn("thread sleep exception", e); } // close this.nettyRemotingServer.close(); this.workerRegistryClient.unRegistry(); this.alertClientService.close(); } catch (Exception e) { logger.error("worker server stop exception ", e); } } @Override public void stop(String cause) { close(cause); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,045
[Bug][Master] Dead server kill itself error(when the register plugin is zookeeper)
**Describe the bug** When a master/worker session is lost from zookeeper, the Ephemeral node will be removed by zookeeper. And the failover node will do the failover job, it will create a node in zookeeper dead-server path. The `HeartBeatTask` of the dead server will check it has been in the dead-server, and it will kill itself. When it stops, it will unregister from the registry(zookeeper). https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java#L371-L379 https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java#L197-L204 The problem is that the dead server's Ephemeral node is already removed, when we use `client.delete()` it will throw an KeeperException.NoNodeException. **To Reproduce** 1. Start a master server 2. delete the Ephemeral node to simulate the master is dead. 3. see exception, and the master cannot shut down, since some other thread cannot exist. **Which version of Dolphin Scheduler:** -[dev] **Requirement or improvement** Ignore the KeeperException.NoNodeException, if the node is not exist, we can think the node has been deleted.
https://github.com/apache/dolphinscheduler/issues/6045
https://github.com/apache/dolphinscheduler/pull/6046
04720b327aef0649e9317573680874c20ea20ad5
839d6054eeb32c9efbab4836aa169e1c9c6ca417
"2021-08-27T09:38:16Z"
java
"2021-08-27T11:43:04Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/RetryReportTaskStatusThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.runner; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.server.worker.cache.ResponceCache; import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; import java.util.Map; /** * Retry Report Task Status Thread */ @Component public class RetryReportTaskStatusThread implements Runnable { private final Logger logger = LoggerFactory.getLogger(RetryReportTaskStatusThread.class); /** * every 5 minutes */ private static long RETRY_REPORT_TASK_STATUS_INTERVAL = 5 * 60 * 1000L; /** * task callback service */ private final TaskCallbackService taskCallbackService; public void start(){ Thread thread = new Thread(this,"RetryReportTaskStatusThread"); thread.start(); } public RetryReportTaskStatusThread(){ this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class); } /** * retry ack/response */ @Override public void run() { ResponceCache responceCache = ResponceCache.get(); while (Stopper.isRunning()){ // sleep 5 minutes ThreadUtils.sleep(RETRY_REPORT_TASK_STATUS_INTERVAL); try { if (!responceCache.getAckCache().isEmpty()){ Map<Integer,Command> ackCache = responceCache.getAckCache(); for (Map.Entry<Integer, Command> entry : ackCache.entrySet()){ Integer taskInstanceId = entry.getKey(); Command ackCommand = entry.getValue(); taskCallbackService.sendAck(taskInstanceId,ackCommand); } } if (!responceCache.getResponseCache().isEmpty()){ Map<Integer,Command> responseCache = responceCache.getResponseCache(); for (Map.Entry<Integer, Command> entry : responseCache.entrySet()){ Integer taskInstanceId = entry.getKey(); Command responseCommand = entry.getValue(); taskCallbackService.sendResult(taskInstanceId,responseCommand); } } }catch (Exception e){ logger.warn("retry report task status error", e); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,045
[Bug][Master] Dead server kill itself error(when the register plugin is zookeeper)
**Describe the bug** When a master/worker session is lost from zookeeper, the Ephemeral node will be removed by zookeeper. And the failover node will do the failover job, it will create a node in zookeeper dead-server path. The `HeartBeatTask` of the dead server will check it has been in the dead-server, and it will kill itself. When it stops, it will unregister from the registry(zookeeper). https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/MasterRegistryClient.java#L371-L379 https://github.com/apache/dolphinscheduler/blob/04720b327aef0649e9317573680874c20ea20ad5/dolphinscheduler-registry-plugin/dolphinscheduler-registry-zookeeper/src/main/java/org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperRegistry.java#L197-L204 The problem is that the dead server's Ephemeral node is already removed, when we use `client.delete()` it will throw an KeeperException.NoNodeException. **To Reproduce** 1. Start a master server 2. delete the Ephemeral node to simulate the master is dead. 3. see exception, and the master cannot shut down, since some other thread cannot exist. **Which version of Dolphin Scheduler:** -[dev] **Requirement or improvement** Ignore the KeeperException.NoNodeException, if the node is not exist, we can think the node has been deleted.
https://github.com/apache/dolphinscheduler/issues/6045
https://github.com/apache/dolphinscheduler/pull/6046
04720b327aef0649e9317573680874c20ea20ad5
839d6054eeb32c9efbab4836aa169e1c9c6ca417
"2021-08-27T09:38:16Z"
java
"2021-08-27T11:43:04Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/WorkerManagerThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.runner; import org.apache.dolphinscheduler.common.enums.Event; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.server.entity.TaskExecutionContext; import org.apache.dolphinscheduler.server.worker.cache.ResponceCache; import org.apache.dolphinscheduler.server.worker.cache.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.server.worker.cache.impl.TaskExecutionContextCacheManagerImpl; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.concurrent.DelayQueue; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; /** * Manage tasks */ @Component public class WorkerManagerThread implements Runnable { private final Logger logger = LoggerFactory.getLogger(WorkerManagerThread.class); /** * task queue */ private final DelayQueue<TaskExecuteThread> workerExecuteQueue = new DelayQueue<>(); /** * worker config */ private final WorkerConfig workerConfig; /** * thread executor service */ private final ExecutorService workerExecService; /** * taskExecutionContextCacheManager */ private TaskExecutionContextCacheManager taskExecutionContextCacheManager; /** * task callback service */ private final TaskCallbackService taskCallbackService; public WorkerManagerThread() { this.workerConfig = SpringApplicationContext.getBean(WorkerConfig.class); this.taskExecutionContextCacheManager = SpringApplicationContext.getBean(TaskExecutionContextCacheManagerImpl.class); this.workerExecService = ThreadUtils.newDaemonFixedThreadExecutor("Worker-Execute-Thread", this.workerConfig.getWorkerExecThreads()); this.taskCallbackService = SpringApplicationContext.getBean(TaskCallbackService.class); } /** * get queue size * * @return queue size */ public int getQueueSize() { return workerExecuteQueue.size(); } /** * Kill tasks that have not been executed, like delay task * then send Response to Master, update the execution status of task instance */ public void killTaskBeforeExecuteByInstanceId(Integer taskInstanceId) { workerExecuteQueue.stream() .filter(taskExecuteThread -> taskExecuteThread.getTaskExecutionContext().getTaskInstanceId() == taskInstanceId) .forEach(workerExecuteQueue::remove); sendTaskKillResponse(taskInstanceId); } /** * kill task before execute , like delay task */ private void sendTaskKillResponse(Integer taskInstanceId) { TaskExecutionContext taskExecutionContext = taskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId); if (taskExecutionContext == null) { return; } TaskExecuteResponseCommand responseCommand = new TaskExecuteResponseCommand(taskExecutionContext.getTaskInstanceId()); responseCommand.setStatus(ExecutionStatus.KILL.getCode()); ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command(), Event.RESULT); taskCallbackService.sendResult(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command()); } /** * submit task * * @param taskExecuteThread taskExecuteThread * @return submit result */ public boolean offer(TaskExecuteThread taskExecuteThread) { return workerExecuteQueue.offer(taskExecuteThread); } public void start() { Thread thread = new Thread(this, this.getClass().getName()); thread.start(); } @Override public void run() { Thread.currentThread().setName("Worker-Execute-Manager-Thread"); TaskExecuteThread taskExecuteThread; while (Stopper.isRunning()) { try { taskExecuteThread = workerExecuteQueue.take(); workerExecService.submit(taskExecuteThread); } catch (Exception e) { logger.error("An unexpected interrupt is happened, " + "the exception will be ignored and this thread will continue to run", e); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,053
[Bug][UI] Lost zh_CN.js
the file zh_CN.js is lost.(dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js) need revert zh_CN.js
https://github.com/apache/dolphinscheduler/issues/6053
https://github.com/apache/dolphinscheduler/pull/6054
037332033058ff42e997ac46a2d078bfcb1da34e
e0eea995200f673d6406ec62c464c77f1d5b6171
"2021-08-28T12:54:52Z"
java
"2021-08-28T16:05:08Z"
dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,038
[Bug][Ui] Width of "SQL Statement" in Dag FormLineModal will be shrunk if sql line is too long
**Describe the bug** Width of "SQL Statement" in Dag FormLineModal will be shrunk if sql line is too long **To Reproduce** Steps to reproduce the behavior, for example: 1. Go to page of create definition 2. Add a sql node 3. input 'ALTER TABLE ods.ods_tz_h5ubt_log_realtime add if not exists partition(dt='${dt_1}')' in Sql Statement 4. See error **Expected behavior** Width of "SQL Statement" in Dag FormLineModal will not be shrunk if sql line is too long **Screenshots** ![Screen Shot 2021-08-26 at 10 32 54 AM](https://user-images.githubusercontent.com/23090803/130896689-f04ae670-fc50-4d26-9d14-95b305e01f4c.png) **Which version of Dolphin Scheduler:** -[1.3.8-release]
https://github.com/apache/dolphinscheduler/issues/6038
https://github.com/apache/dolphinscheduler/pull/6040
e0eea995200f673d6406ec62c464c77f1d5b6171
e866d1be86464d812551e8c38ba60767a204c82e
"2021-08-26T03:02:30Z"
java
"2021-08-31T08:27:45Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.scss
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ .form-model-wrapper { width: 720px; height: 100vh; position: relative; .title-box { height: 61px; border-bottom: 1px solid #DCDEDC; position: relative; .name { position: absolute; left: 24px; top: 18px; font-size: 16px; } .go-subtask { position: absolute; right: 30px; top: 17px; a { font-size: 14px; color: #0097e0; margin-left: 10px; i.ansicon { font-size: 18px; vertical-align: middle; } em { color: #333; vertical-align: middle; font-style: normal; vertical-align: middle; padding-left: 2px; } &:hover { em { text-decoration: underline; } } } } } .bottom-box { position: absolute; bottom: 0; left: 0; width: 100%; text-align: right; height: 60px; line-height: 60px; border-top: 1px solid #DCDEDC; background: #fff; .submit { padding-right: 20px; position: relative; z-index: 9; } } .content-box { overflow-y: scroll; height: calc(100vh - 121px); } } .form-model { padding-top: 26px; padding-bottom: 10px; >div { clear: both; } .list { display: flex; margin-right: 25px; margin-bottom: 10px; .text-box { width: 130px; text-align: right; margin-right: 8px; >span { font-size: 14px; color: #777; display: inline-block; padding-top: 6px; } } .cont-box { flex: 1; .label-box { width: 100%; } .text-b { font-size: 14px; color: #777; display: inline-block; padding:0 6px 0 20px; } .cont-extra { margin-left: 15px; flex: 1; } .el-radio-group { vertical-align: sub; padding-top: 6px; margin-top: 3px; } .user-def-params-model { padding-top: 3px; } } .cont-box + .text-box { margin-left: 30px; } .display-flex { display: flex; } .flex-1 { flex: 1; } .add { line-height: 32px; a { color: #0097e0; } } .list-t { width: 50%; float: left; } } .requiredIcon { color: #ff0000; padding-right: 4px; } .ans-modal-box-max { position: absolute; right: -12px; top: -16px; } .vue-treeselect--disabled { .vue-treeselect__control { background-color: #ecf3f8; .vue-treeselect__single-value { color: #6d859e; } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,135
[Bug][API]the interface of 'authed project' is missed.
branch: [dev] "authed-project" in API module is missed ![image](https://user-images.githubusercontent.com/29528966/132490379-e40c3df0-74c0-4bf8-8f5b-adfbbff96666.png)
https://github.com/apache/dolphinscheduler/issues/6135
https://github.com/apache/dolphinscheduler/pull/6136
9d7f70bca777177c14efb3d32e469a54951f4a6b
edb8f3c435a052bdd622b5a9a7c4ce995f5705b5
"2021-09-08T10:09:49Z"
java
"2021-09-08T12:47:54Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.AUTHORIZED_DATA_SOURCE; import static org.apache.dolphinscheduler.api.enums.Status.CONNECTION_TEST_FAILURE; import static org.apache.dolphinscheduler.api.enums.Status.CONNECT_DATASOURCE_FAILURE; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_DATASOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_DATA_SOURCE_FAILURE; import static org.apache.dolphinscheduler.api.enums.Status.KERBEROS_STARTUP_STATE; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_DATASOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UNAUTHORIZED_DATASOURCE; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_DATASOURCE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_DATASOURCE_NAME_FAILURE; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.DataSourceService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.datasource.BaseDataSourceParamDTO; import org.apache.dolphinscheduler.common.datasource.ConnectionParam; import org.apache.dolphinscheduler.common.datasource.DatasourceUtil; import org.apache.dolphinscheduler.common.enums.DbType; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestBody; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import springfox.documentation.annotations.ApiIgnore; /** * data source controller */ @Api(tags = "DATA_SOURCE_TAG") @RestController @RequestMapping("datasources") public class DataSourceController extends BaseController { @Autowired private DataSourceService dataSourceService; /** * create data source * * @param loginUser login user * @param dataSourceParam datasource param * @return create result code */ @ApiOperation(value = "createDataSource", notes = "CREATE_DATA_SOURCE_NOTES") @PostMapping() @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_DATASOURCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "dataSourceParam", value = "DATA_SOURCE_PARAM", required = true) @RequestBody BaseDataSourceParamDTO dataSourceParam) { return dataSourceService.createDataSource(loginUser, dataSourceParam); } /** * updateProcessInstance data source * * @param loginUser login user * @param id datasource id * @param dataSourceParam datasource param * @return update result code */ @ApiOperation(value = "updateDataSource", notes = "UPDATE_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "DATA_SOURCE_ID", required = true, dataType = "Integer"), @ApiImplicitParam(name = "dataSourceParam", value = "DATA_SOURCE_PARAM", required = true, dataType = "BaseDataSourceParamDTO") }) @PutMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_DATASOURCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable(value = "id") Integer id, @RequestBody BaseDataSourceParamDTO dataSourceParam) { dataSourceParam.setId(id); return dataSourceService.updateDataSource(dataSourceParam.getId(), loginUser, dataSourceParam); } /** * query data source detail * * @param loginUser login user * @param id datasource id * @return data source detail */ @ApiOperation(value = "queryDataSource", notes = "QUERY_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "DATA_SOURCE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("id") int id) { Map<String, Object> result = dataSourceService.queryDataSource(id); return returnDataList(result); } /** * query datasource by type * * @param loginUser login user * @param type data source type * @return data source list page */ @ApiOperation(value = "queryDataSourceList", notes = "QUERY_DATA_SOURCE_LIST_BY_TYPE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "type", value = "DB_TYPE", required = true, dataType = "DbType") }) @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryDataSourceList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("type") DbType type) { Map<String, Object> result = dataSourceService.queryDataSourceList(loginUser, type.ordinal()); return returnDataList(result); } /** * query datasource with paging * * @param loginUser login user * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return data source list page */ @ApiOperation(value = "queryDataSourceListPaging", notes = "QUERY_DATA_SOURCE_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "20") }) @GetMapping() @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATASOURCE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryDataSourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); return dataSourceService.queryDataSourceListPaging(loginUser, searchVal, pageNo, pageSize); } /** * connect datasource * * @param loginUser login user * @param dataSourceParam datasource param * @return connect result code */ @ApiOperation(value = "connectDataSource", notes = "CONNECT_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "dataSourceParam", value = "DATA_SOURCE_PARAM", required = true, dataType = "BaseDataSourceParamDTO") }) @PostMapping(value = "/connect") @ResponseStatus(HttpStatus.OK) @ApiException(CONNECT_DATASOURCE_FAILURE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result connectDataSource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestBody BaseDataSourceParamDTO dataSourceParam) { DatasourceUtil.checkDatasourceParam(dataSourceParam); ConnectionParam connectionParams = DatasourceUtil.buildConnectionParams(dataSourceParam); return dataSourceService.checkConnection(dataSourceParam.getType(), connectionParams); } /** * connection test * * @param loginUser login user * @param id data source id * @return connect result code */ @ApiOperation(value = "connectionTest", notes = "CONNECT_DATA_SOURCE_TEST_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "DATA_SOURCE_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{id}/connect-test") @ResponseStatus(HttpStatus.OK) @ApiException(CONNECTION_TEST_FAILURE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result connectionTest(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("id") int id) { return dataSourceService.connectionTest(id); } /** * delete datasource by id * * @param loginUser login user * @param id datasource id * @return delete result */ @ApiOperation(value = "deleteDataSource", notes = "DELETE_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "id", value = "DATA_SOURCE_ID", required = true, dataType = "Int", example = "100") }) @DeleteMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_DATA_SOURCE_FAILURE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result delete(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("id") int id) { return dataSourceService.delete(loginUser, id); } /** * verify datasource name * * @param loginUser login user * @param name data source name * @return true if data source name not exists.otherwise return false */ @ApiOperation(value = "verifyDataSourceName", notes = "VERIFY_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "DATA_SOURCE_NAME", required = true, dataType = "String") }) @GetMapping(value = "/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_DATASOURCE_NAME_FAILURE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result verifyDataSourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "name") String name ) { return dataSourceService.verifyDataSourceName(name); } /** * unauthorized datasource * * @param loginUser login user * @param userId user id * @return unauthed data source result code */ @ApiOperation(value = "unauthDatasource", notes = "UNAUTHORIZED_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/unauth") @ResponseStatus(HttpStatus.OK) @ApiException(UNAUTHORIZED_DATASOURCE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result unauthDatasource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { Map<String, Object> result = dataSourceService.unauthDatasource(loginUser, userId); return returnDataList(result); } /** * authorized datasource * * @param loginUser login user * @param userId user id * @return authorized result code */ @ApiOperation(value = "authedDatasource", notes = "AUTHORIZED_DATA_SOURCE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/authed") @ResponseStatus(HttpStatus.OK) @ApiException(AUTHORIZED_DATA_SOURCE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result authedDatasource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { Map<String, Object> result = dataSourceService.authedDatasource(loginUser, userId); return returnDataList(result); } /** * get user info * * @param loginUser login user * @return user info data */ @ApiOperation(value = "getKerberosStartupState", notes = "GET_USER_INFO_NOTES") @GetMapping(value = "/kerberos-startup-state") @ResponseStatus(HttpStatus.OK) @ApiException(KERBEROS_STARTUP_STATE) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result getKerberosStartupState(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { // if upload resource is HDFS and kerberos startup is true , else false return success(Status.SUCCESS.getMsg(), CommonUtils.getKerberosStartupState()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,135
[Bug][API]the interface of 'authed project' is missed.
branch: [dev] "authed-project" in API module is missed ![image](https://user-images.githubusercontent.com/29528966/132490379-e40c3df0-74c0-4bf8-8f5b-adfbbff96666.png)
https://github.com/apache/dolphinscheduler/issues/6135
https://github.com/apache/dolphinscheduler/pull/6136
9d7f70bca777177c14efb3d32e469a54951f4a6b
edb8f3c435a052bdd622b5a9a7c4ce995f5705b5
"2021-09-08T10:09:49Z"
java
"2021-09-08T12:47:54Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProjectController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_AUTHORIZED_PROJECT; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROJECT_DETAILS_BY_CODE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_UNAUTHORIZED_PROJECT_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_PROJECT_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.User; import java.util.Map; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import springfox.documentation.annotations.ApiIgnore; /** * project controller */ @Api(tags = "PROJECT_TAG") @RestController @RequestMapping("projects") public class ProjectController extends BaseController { @Autowired private ProjectService projectService; /** * create project * * @param loginUser login user * @param projectName project name * @param description description * @return returns an error if it exists */ @ApiOperation(value = "create", notes = "CREATE_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "projectName", value = "PROJECT_NAME", dataType = "String"), @ApiImplicitParam(name = "description", value = "PROJECT_DESC", dataType = "String") }) @PostMapping() @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_PROJECT_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("projectName") String projectName, @RequestParam(value = "description", required = false) String description) { Map<String, Object> result = projectService.createProject(loginUser, projectName, description); return returnDataList(result); } /** * update project * * @param loginUser login user * @param code project code * @param projectName project name * @param description description * @return update result code */ @ApiOperation(value = "update", notes = "UPDATE_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROJECT_CODE", dataType = "Long", example = "123456"), @ApiImplicitParam(name = "projectName", value = "PROJECT_NAME", dataType = "String"), @ApiImplicitParam(name = "description", value = "PROJECT_DESC", dataType = "String"), @ApiImplicitParam(name = "userName", value = "USER_NAME", dataType = "String"), }) @PutMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_PROJECT_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("code") Long code, @RequestParam("projectName") String projectName, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "userName") String userName) { Map<String, Object> result = projectService.update(loginUser, code, projectName, description, userName); return returnDataList(result); } /** * query project details by code * * @param loginUser login user * @param code project code * @return project detail information */ @ApiOperation(value = "queryProjectByCode", notes = "QUERY_PROJECT_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROJECT_CODE", dataType = "Long", example = "123456") }) @GetMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROJECT_DETAILS_BY_CODE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProjectByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("code") long code) { Map<String, Object> result = projectService.queryByCode(loginUser, code); return returnDataList(result); } /** * query project list paging * * @param loginUser login user * @param searchVal search value * @param pageSize page size * @param pageNo page number * @return project list which the login user have permission to see */ @ApiOperation(value = "queryProjectListPaging", notes = "QUERY_PROJECT_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType = "String"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1") }) @GetMapping() @ResponseStatus(HttpStatus.OK) @ApiException(LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProjectListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam("pageSize") Integer pageSize, @RequestParam("pageNo") Integer pageNo ) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); result = projectService.queryProjectListPaging(loginUser, pageSize, pageNo, searchVal); return result; } /** * delete project by code * * @param loginUser login user * @param code project code * @return delete result code */ @ApiOperation(value = "delete", notes = "DELETE_PROJECT_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROJECT_CODE", dataType = "Long", example = "123456") }) @DeleteMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_PROJECT_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("code") Long code) { Map<String, Object> result = projectService.deleteProject(loginUser, code); return returnDataList(result); } /** * query unauthorized project * * @param loginUser login user * @param userId user id * @return the projects which user have not permission to see */ @ApiOperation(value = "queryUnauthorizedProject", notes = "QUERY_UNAUTHORIZED_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", dataType = "Int", example = "100") }) @GetMapping(value = "/unauth") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_UNAUTHORIZED_PROJECT_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryUnauthorizedProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { Map<String, Object> result = projectService.queryUnauthorizedProject(loginUser, userId); return returnDataList(result); } /** * query authorized project * * @param loginUser login user * @param userId user id * @return projects which the user have permission to see, Except for items created by this user */ @ApiOperation(value = "queryAuthorizedProject", notes = "QUERY_AUTHORIZED_PROJECT_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "userId", value = "USER_ID", dataType = "Int", example = "100") }) @GetMapping(value = "/authed") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_AUTHORIZED_PROJECT) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryAuthorizedProject(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { Map<String, Object> result = projectService.queryAuthorizedProject(loginUser, userId); return returnDataList(result); } /** * query authorized and user created project * * @param loginUser login user * @return projects which the user create and authorized */ @ApiOperation(value = "queryProjectCreatedAndAuthorizedByUser", notes = "QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_NOTES") @GetMapping(value = "/created-and-authed") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProjectCreatedAndAuthorizedByUser(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = projectService.queryProjectCreatedAndAuthorizedByUser(loginUser); return returnDataList(result); } /** * query all project list * * @param loginUser login user * @return all project list */ @ApiOperation(value = "queryAllProjectList", notes = "QUERY_ALL_PROJECT_LIST_NOTES") @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryAllProjectList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser) { Map<String, Object> result = projectService.queryAllProjectList(); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,135
[Bug][API]the interface of 'authed project' is missed.
branch: [dev] "authed-project" in API module is missed ![image](https://user-images.githubusercontent.com/29528966/132490379-e40c3df0-74c0-4bf8-8f5b-adfbbff96666.png)
https://github.com/apache/dolphinscheduler/issues/6135
https://github.com/apache/dolphinscheduler/pull/6136
9d7f70bca777177c14efb3d32e469a54951f4a6b
edb8f3c435a052bdd622b5a9a7c4ce995f5705b5
"2021-09-08T10:09:49Z"
java
"2021-09-08T12:47:54Z"
sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `message` text COMMENT 'message', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `task_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_code` bigint(20) not NULL COMMENT 'process definition code', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `environment_config` text DEFAULT '' COMMENT 'this config contains many environment variables config', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`), ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,138
[Bug][sql] Create t_ds_task_instance mysql table error
**Describe the bug** 1. ~~When I create the `t_ds_task_instance` table, it will throw an exception.~~ https://github.com/apache/dolphinscheduler/blob/9d7f70bca777177c14efb3d32e469a54951f4a6b/sql/dolphinscheduler_mysql.sql#L819 ``` [2021-09-08 20:09:39] [42000][1101] BLOB, TEXT, GEOMETRY or JSON column 'environment_config' can't have a default value ``` We cannot set default value for text in mysql https://dev.mysql.com/doc/refman/8.0/en/blob.html ![image](https://user-images.githubusercontent.com/22415594/132508865-8e177ba3-9fa9-4e1f-bd4a-d8f26f03f7bd.png) 2. ~~And we also need to remove the extra comma in line 991~~ https://github.com/apache/dolphinscheduler/blob/9d7f70bca777177c14efb3d32e469a54951f4a6b/sql/dolphinscheduler_mysql.sql#L991 3. And in other table such as `t_ds_task_definition`, `t_ds_task_definition_log` the environment_code is suggested to set default -1, currently it is not null, this is unreasonable. It seems in the program code this default value is -1, I am not sure. 4. Add `environment_code` in `t_ds_process_instance` table. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6138
https://github.com/apache/dolphinscheduler/pull/6140
71cb6690916e6c3947665d4c5d08c83d06fd2202
dd5acfb7f0a19b6cac16ee299801adaba1fdfdfa
"2021-09-08T12:26:13Z"
java
"2021-09-09T02:43:34Z"
sql/dolphinscheduler_mysql.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_BLOB_TRIGGERS`; CREATE TABLE `QRTZ_BLOB_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `BLOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `SCHED_NAME` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_BLOB_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CALENDARS`; CREATE TABLE `QRTZ_CALENDARS` ( `SCHED_NAME` varchar(120) NOT NULL, `CALENDAR_NAME` varchar(200) NOT NULL, `CALENDAR` blob NOT NULL, PRIMARY KEY (`SCHED_NAME`,`CALENDAR_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_CRON_TRIGGERS`; CREATE TABLE `QRTZ_CRON_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `CRON_EXPRESSION` varchar(120) NOT NULL, `TIME_ZONE_ID` varchar(80) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_CRON_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_FIRED_TRIGGERS`; CREATE TABLE `QRTZ_FIRED_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `ENTRY_ID` varchar(200) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `FIRED_TIME` bigint(13) NOT NULL, `SCHED_TIME` bigint(13) NOT NULL, `PRIORITY` int(11) NOT NULL, `STATE` varchar(16) NOT NULL, `JOB_NAME` varchar(200) DEFAULT NULL, `JOB_GROUP` varchar(200) DEFAULT NULL, `IS_NONCONCURRENT` varchar(1) DEFAULT NULL, `REQUESTS_RECOVERY` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`ENTRY_ID`), KEY `IDX_QRTZ_FT_TRIG_INST_NAME` (`SCHED_NAME`,`INSTANCE_NAME`), KEY `IDX_QRTZ_FT_INST_JOB_REQ_RCVRY` (`SCHED_NAME`,`INSTANCE_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_FT_J_G` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_FT_T_G` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_FT_TG` (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_JOB_DETAILS`; CREATE TABLE `QRTZ_JOB_DETAILS` ( `SCHED_NAME` varchar(120) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `JOB_CLASS_NAME` varchar(250) NOT NULL, `IS_DURABLE` varchar(1) NOT NULL, `IS_NONCONCURRENT` varchar(1) NOT NULL, `IS_UPDATE_DATA` varchar(1) NOT NULL, `REQUESTS_RECOVERY` varchar(1) NOT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_J_REQ_RECOVERY` (`SCHED_NAME`,`REQUESTS_RECOVERY`), KEY `IDX_QRTZ_J_GRP` (`SCHED_NAME`,`JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_LOCKS`; CREATE TABLE `QRTZ_LOCKS` ( `SCHED_NAME` varchar(120) NOT NULL, `LOCK_NAME` varchar(40) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`LOCK_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_PAUSED_TRIGGER_GRPS`; CREATE TABLE `QRTZ_PAUSED_TRIGGER_GRPS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SCHEDULER_STATE`; CREATE TABLE `QRTZ_SCHEDULER_STATE` ( `SCHED_NAME` varchar(120) NOT NULL, `INSTANCE_NAME` varchar(200) NOT NULL, `LAST_CHECKIN_TIME` bigint(13) NOT NULL, `CHECKIN_INTERVAL` bigint(13) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`INSTANCE_NAME`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPLE_TRIGGERS`; CREATE TABLE `QRTZ_SIMPLE_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `REPEAT_COUNT` bigint(7) NOT NULL, `REPEAT_INTERVAL` bigint(12) NOT NULL, `TIMES_TRIGGERED` bigint(10) NOT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPLE_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_SIMPROP_TRIGGERS`; CREATE TABLE `QRTZ_SIMPROP_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `STR_PROP_1` varchar(512) DEFAULT NULL, `STR_PROP_2` varchar(512) DEFAULT NULL, `STR_PROP_3` varchar(512) DEFAULT NULL, `INT_PROP_1` int(11) DEFAULT NULL, `INT_PROP_2` int(11) DEFAULT NULL, `LONG_PROP_1` bigint(20) DEFAULT NULL, `LONG_PROP_2` bigint(20) DEFAULT NULL, `DEC_PROP_1` decimal(13,4) DEFAULT NULL, `DEC_PROP_2` decimal(13,4) DEFAULT NULL, `BOOL_PROP_1` varchar(1) DEFAULT NULL, `BOOL_PROP_2` varchar(1) DEFAULT NULL, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), CONSTRAINT `QRTZ_SIMPROP_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) REFERENCES `QRTZ_TRIGGERS` (`SCHED_NAME`, `TRIGGER_NAME`, `TRIGGER_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS `QRTZ_TRIGGERS`; CREATE TABLE `QRTZ_TRIGGERS` ( `SCHED_NAME` varchar(120) NOT NULL, `TRIGGER_NAME` varchar(200) NOT NULL, `TRIGGER_GROUP` varchar(200) NOT NULL, `JOB_NAME` varchar(200) NOT NULL, `JOB_GROUP` varchar(200) NOT NULL, `DESCRIPTION` varchar(250) DEFAULT NULL, `NEXT_FIRE_TIME` bigint(13) DEFAULT NULL, `PREV_FIRE_TIME` bigint(13) DEFAULT NULL, `PRIORITY` int(11) DEFAULT NULL, `TRIGGER_STATE` varchar(16) NOT NULL, `TRIGGER_TYPE` varchar(8) NOT NULL, `START_TIME` bigint(13) NOT NULL, `END_TIME` bigint(13) DEFAULT NULL, `CALENDAR_NAME` varchar(200) DEFAULT NULL, `MISFIRE_INSTR` smallint(2) DEFAULT NULL, `JOB_DATA` blob, PRIMARY KEY (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_J` (`SCHED_NAME`,`JOB_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_JG` (`SCHED_NAME`,`JOB_GROUP`), KEY `IDX_QRTZ_T_C` (`SCHED_NAME`,`CALENDAR_NAME`), KEY `IDX_QRTZ_T_G` (`SCHED_NAME`,`TRIGGER_GROUP`), KEY `IDX_QRTZ_T_STATE` (`SCHED_NAME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_STATE` (`SCHED_NAME`,`TRIGGER_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_N_G_STATE` (`SCHED_NAME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NEXT_FIRE_TIME` (`SCHED_NAME`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST` (`SCHED_NAME`,`TRIGGER_STATE`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_STATE`), KEY `IDX_QRTZ_T_NFT_ST_MISFIRE_GRP` (`SCHED_NAME`,`MISFIRE_INSTR`,`NEXT_FIRE_TIME`,`TRIGGER_GROUP`,`TRIGGER_STATE`), CONSTRAINT `QRTZ_TRIGGERS_ibfk_1` FOREIGN KEY (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) REFERENCES `QRTZ_JOB_DETAILS` (`SCHED_NAME`, `JOB_NAME`, `JOB_GROUP`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS `t_ds_access_token`; CREATE TABLE `t_ds_access_token` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `token` varchar(64) DEFAULT NULL COMMENT 'token', `expire_time` datetime DEFAULT NULL COMMENT 'end time of token ', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert`; CREATE TABLE `t_ds_alert` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `title` varchar(64) DEFAULT NULL COMMENT 'title', `content` text COMMENT 'Message content (can be email, can be SMS. Mail is stored in JSON map, and SMS is string)', `alert_status` tinyint(4) DEFAULT '0' COMMENT '0:wait running,1:success,2:failed', `log` text COMMENT 'log', `alertgroup_id` int(11) DEFAULT NULL COMMENT 'alert group id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alertgroup`; CREATE TABLE `t_ds_alertgroup`( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids', `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id', `group_name` varchar(255) DEFAULT NULL COMMENT 'group name', `description` varchar(255) DEFAULT NULL, `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_command`; CREATE TABLE `t_ds_command` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread', `process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'Node dependency type: 0 current node, 1 forward, 2 backward', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'Failed policy: 0 end, 1 continue', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS `t_ds_datasource`; CREATE TABLE `t_ds_datasource` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(64) NOT NULL COMMENT 'data source name', `note` varchar(255) DEFAULT NULL COMMENT 'description', `type` tinyint(4) NOT NULL COMMENT 'data source type: 0:mysql,1:postgresql,2:hive,3:spark', `user_id` int(11) NOT NULL COMMENT 'the creator id', `connection_params` text NOT NULL COMMENT 'json connection params', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS `t_ds_error_command`; CREATE TABLE `t_ds_error_command` ( `id` int(11) NOT NULL COMMENT 'key', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `executor_id` int(11) DEFAULT NULL COMMENT 'executor id', `process_definition_code` bigint(20) DEFAULT NULL COMMENT 'process definition code', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'scheduler time', `start_time` datetime DEFAULT NULL COMMENT 'start time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority, 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) COMMENT 'worker group', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `message` text COMMENT 'message', PRIMARY KEY (`id`) USING BTREE ) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC; -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition`; CREATE TABLE `t_ds_process_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(255) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out, unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `process_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_definition_log`; CREATE TABLE `t_ds_process_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'process definition name', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `release_state` tinyint(4) DEFAULT NULL COMMENT 'process definition release state:0:offline,1:online', `user_id` int(11) DEFAULT NULL COMMENT 'process definition creator id', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT NULL COMMENT '0 not available, 1 available', `locations` text COMMENT 'Node location information', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `timeout` int(11) DEFAULT '0' COMMENT 'time out,unit: minute', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition`; CREATE TABLE `t_ds_task_definition` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` longtext COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`,`code`), UNIQUE KEY `task_unique` (`name`,`project_code`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_definition_log`; CREATE TABLE `t_ds_task_definition_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `code` bigint(20) NOT NULL COMMENT 'encoding', `name` varchar(200) DEFAULT NULL COMMENT 'task definition name', `version` int(11) DEFAULT NULL COMMENT 'task definition version', `description` text COMMENT 'description', `project_code` bigint(20) NOT NULL COMMENT 'project code', `user_id` int(11) DEFAULT NULL COMMENT 'task definition creator id', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(2) DEFAULT NULL COMMENT '0 not available, 1 available', `task_priority` tinyint(4) DEFAULT NULL COMMENT 'job priority', `worker_group` varchar(200) DEFAULT NULL COMMENT 'worker grouping', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `fail_retry_times` int(11) DEFAULT NULL COMMENT 'number of failed retries', `fail_retry_interval` int(11) DEFAULT NULL COMMENT 'failed retry interval', `timeout_flag` tinyint(2) DEFAULT '0' COMMENT 'timeout flag:0 close, 1 open', `timeout_notify_strategy` tinyint(4) DEFAULT NULL COMMENT 'timeout notification policy: 0 warning, 1 fail', `timeout` int(11) DEFAULT '0' COMMENT 'timeout length,unit: minute', `delay_time` int(11) DEFAULT '0' COMMENT 'delay execution time,unit: minute', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource id, separated by comma', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation`; CREATE TABLE `t_ds_process_task_relation` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_task_relation_log`; CREATE TABLE `t_ds_process_task_relation_log` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'self-increasing id', `name` varchar(200) DEFAULT NULL COMMENT 'relation name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process version', `project_code` bigint(20) NOT NULL COMMENT 'project code', `process_definition_code` bigint(20) NOT NULL COMMENT 'process code', `pre_task_code` bigint(20) NOT NULL COMMENT 'pre task code', `pre_task_version` int(11) NOT NULL COMMENT 'pre task version', `post_task_code` bigint(20) NOT NULL COMMENT 'post task code', `post_task_version` int(11) NOT NULL COMMENT 'post task version', `condition_type` tinyint(2) DEFAULT NULL COMMENT 'condition type : 0 none, 1 judge 2 delay', `condition_params` text COMMENT 'condition params(json)', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `operate_time` datetime DEFAULT NULL COMMENT 'operate time', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_process_instance`; CREATE TABLE `t_ds_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'process instance name', `process_definition_version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_code` bigint(20) not NULL COMMENT 'process definition code', `state` tinyint(4) DEFAULT NULL COMMENT 'process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `recovery` tinyint(4) DEFAULT NULL COMMENT 'process instance failover flag:0:normal,1:failover instance', `start_time` datetime DEFAULT NULL COMMENT 'process instance start time', `end_time` datetime DEFAULT NULL COMMENT 'process instance end time', `run_times` int(11) DEFAULT NULL COMMENT 'process instance run times', `host` varchar(135) DEFAULT NULL COMMENT 'process instance host', `command_type` tinyint(4) DEFAULT NULL COMMENT 'command type', `command_param` text COMMENT 'json command parameters', `task_depend_type` tinyint(4) DEFAULT NULL COMMENT 'task depend type. 0: only current node,1:before the node,2:later nodes', `max_try_times` tinyint(4) DEFAULT '0' COMMENT 'max try times', `failure_strategy` tinyint(4) DEFAULT '0' COMMENT 'failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed', `warning_type` tinyint(4) DEFAULT '0' COMMENT 'warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success', `warning_group_id` int(11) DEFAULT NULL COMMENT 'warning group id', `schedule_time` datetime DEFAULT NULL COMMENT 'schedule time', `command_start_time` datetime DEFAULT NULL COMMENT 'command start time', `global_params` text COMMENT 'global parameters', `flag` tinyint(4) DEFAULT '1' COMMENT 'flag', `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `is_sub_process` int(11) DEFAULT '0' COMMENT 'flag, whether the process is sub process', `executor_id` int(11) NOT NULL COMMENT 'executor id', `history_cmd` text COMMENT 'history commands of process instance operation', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `tenant_id` int(11) NOT NULL DEFAULT '-1' COMMENT 'tenant id', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_index` (`process_definition_code`,`id`) USING BTREE, KEY `start_time_index` (`start_time`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS `t_ds_project`; CREATE TABLE `t_ds_project` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(100) DEFAULT NULL COMMENT 'project name', `code` bigint(20) NOT NULL COMMENT 'encoding', `description` varchar(200) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'creator id', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS `t_ds_queue`; CREATE TABLE `t_ds_queue` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `queue_name` varchar(64) DEFAULT NULL COMMENT 'queue name', `queue` varchar(64) DEFAULT NULL COMMENT 'yarn queue name', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO `t_ds_queue` VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_datasource_user`; CREATE TABLE `t_ds_relation_datasource_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `datasource_id` int(11) DEFAULT NULL COMMENT 'data source id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_process_instance`; CREATE TABLE `t_ds_relation_process_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `parent_process_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `parent_task_instance_id` int(11) DEFAULT NULL COMMENT 'parent process instance id', `process_instance_id` int(11) DEFAULT NULL COMMENT 'child process instance id', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_project_user`; CREATE TABLE `t_ds_relation_project_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `project_id` int(11) DEFAULT NULL COMMENT 'project id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), KEY `user_id_index` (`user_id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_resources_user`; CREATE TABLE `t_ds_relation_resources_user` ( `id` int(11) NOT NULL AUTO_INCREMENT, `user_id` int(11) NOT NULL COMMENT 'user id', `resources_id` int(11) DEFAULT NULL COMMENT 'resource id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_relation_udfs_user`; CREATE TABLE `t_ds_relation_udfs_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'userid', `udf_id` int(11) DEFAULT NULL COMMENT 'udf id', `perm` int(11) DEFAULT '1' COMMENT 'limits of authority', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS `t_ds_resources`; CREATE TABLE `t_ds_resources` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `alias` varchar(64) DEFAULT NULL COMMENT 'alias', `file_name` varchar(64) DEFAULT NULL COMMENT 'file name', `description` varchar(255) DEFAULT NULL, `user_id` int(11) DEFAULT NULL COMMENT 'user id', `type` tinyint(4) DEFAULT NULL COMMENT 'resource type,0:FILE,1:UDF', `size` bigint(20) DEFAULT NULL COMMENT 'resource size', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `pid` int(11) DEFAULT NULL, `full_name` varchar(64) DEFAULT NULL, `is_directory` tinyint(4) DEFAULT NULL, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_resources_un` (`full_name`,`type`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS `t_ds_schedules`; CREATE TABLE `t_ds_schedules` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_code` bigint(20) NOT NULL COMMENT 'process definition code', `start_time` datetime NOT NULL COMMENT 'start time', `end_time` datetime NOT NULL COMMENT 'end time', `timezone_id` varchar(40) DEFAULT NULL COMMENT 'timezoneId', `crontab` varchar(255) NOT NULL COMMENT 'crontab description', `failure_strategy` tinyint(4) NOT NULL COMMENT 'failure strategy. 0:end,1:continue', `user_id` int(11) NOT NULL COMMENT 'user id', `release_state` tinyint(4) NOT NULL COMMENT 'release state. 0:offline,1:online ', `warning_type` tinyint(4) NOT NULL COMMENT 'Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent', `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id', `process_instance_priority` int(11) DEFAULT NULL COMMENT 'process instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT '' COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS `t_ds_session`; CREATE TABLE `t_ds_session` ( `id` varchar(64) NOT NULL COMMENT 'key', `user_id` int(11) DEFAULT NULL COMMENT 'user id', `ip` varchar(45) DEFAULT NULL COMMENT 'ip', `last_login_time` datetime DEFAULT NULL COMMENT 'last login time', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_task_instance`; CREATE TABLE `t_ds_task_instance` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `name` varchar(255) DEFAULT NULL COMMENT 'task name', `task_type` varchar(50) NOT NULL COMMENT 'task type', `task_code` bigint(20) NOT NULL COMMENT 'task definition code', `task_definition_version` int(11) DEFAULT NULL COMMENT 'task definition version', `process_instance_id` int(11) DEFAULT NULL COMMENT 'process instance id', `state` tinyint(4) DEFAULT NULL COMMENT 'Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete', `submit_time` datetime DEFAULT NULL COMMENT 'task submit time', `start_time` datetime DEFAULT NULL COMMENT 'task start time', `end_time` datetime DEFAULT NULL COMMENT 'task end time', `host` varchar(135) DEFAULT NULL COMMENT 'host of task running on', `execute_path` varchar(200) DEFAULT NULL COMMENT 'task execute path in the host', `log_path` varchar(200) DEFAULT NULL COMMENT 'task log path', `alert_flag` tinyint(4) DEFAULT NULL COMMENT 'whether alert', `retry_times` int(4) DEFAULT '0' COMMENT 'task retry times', `pid` int(4) DEFAULT NULL COMMENT 'pid of task', `app_link` text COMMENT 'yarn app id', `task_params` text COMMENT 'job custom parameters', `flag` tinyint(4) DEFAULT '1' COMMENT '0 not available, 1 available', `retry_interval` int(4) DEFAULT NULL COMMENT 'retry interval when task failed ', `max_retry_times` int(2) DEFAULT NULL COMMENT 'max retry times', `task_instance_priority` int(11) DEFAULT NULL COMMENT 'task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest', `worker_group` varchar(64) DEFAULT NULL COMMENT 'worker group id', `environment_code` bigint(20) DEFAULT '-1' COMMENT 'environment code', `environment_config` text DEFAULT '' COMMENT 'this config contains many environment variables config', `executor_id` int(11) DEFAULT NULL, `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time', `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time', `var_pool` longtext COMMENT 'var_pool', PRIMARY KEY (`id`), KEY `process_instance_id` (`process_instance_id`) USING BTREE, CONSTRAINT `foreign_key_instance_id` FOREIGN KEY (`process_instance_id`) REFERENCES `t_ds_process_instance` (`id`) ON DELETE CASCADE ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS `t_ds_tenant`; CREATE TABLE `t_ds_tenant` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `tenant_code` varchar(64) DEFAULT NULL COMMENT 'tenant code', `description` varchar(255) DEFAULT NULL, `queue_id` int(11) DEFAULT NULL COMMENT 'queue id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS `t_ds_udfs`; CREATE TABLE `t_ds_udfs` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `user_id` int(11) NOT NULL COMMENT 'user id', `func_name` varchar(100) NOT NULL COMMENT 'UDF function name', `class_name` varchar(255) NOT NULL COMMENT 'class of udf', `type` tinyint(4) NOT NULL COMMENT 'Udf function type', `arg_types` varchar(255) DEFAULT NULL COMMENT 'arguments types', `database` varchar(255) DEFAULT NULL COMMENT 'data base', `description` varchar(255) DEFAULT NULL, `resource_id` int(11) NOT NULL COMMENT 'resource id', `resource_name` varchar(255) NOT NULL COMMENT 'resource name', `create_time` datetime NOT NULL COMMENT 'create time', `update_time` datetime NOT NULL COMMENT 'update time', PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS `t_ds_user`; CREATE TABLE `t_ds_user` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'user id', `user_name` varchar(64) DEFAULT NULL COMMENT 'user name', `user_password` varchar(64) DEFAULT NULL COMMENT 'user password', `user_type` tinyint(4) DEFAULT NULL COMMENT 'user type, 0:administrator,1:ordinary user', `email` varchar(64) DEFAULT NULL COMMENT 'email', `phone` varchar(11) DEFAULT NULL COMMENT 'phone', `tenant_id` int(11) DEFAULT NULL COMMENT 'tenant id', `create_time` datetime DEFAULT NULL COMMENT 'create time', `update_time` datetime DEFAULT NULL COMMENT 'update time', `queue` varchar(64) DEFAULT NULL COMMENT 'queue', `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable', PRIMARY KEY (`id`), UNIQUE KEY `user_name_unique` (`user_name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS `t_ds_worker_group`; CREATE TABLE `t_ds_worker_group` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `name` varchar(255) NOT NULL COMMENT 'worker group name', `addr_list` text NULL DEFAULT NULL COMMENT 'worker addr list. split by [,]', `create_time` datetime NULL DEFAULT NULL COMMENT 'create time', `update_time` datetime NULL DEFAULT NULL COMMENT 'update time', PRIMARY KEY (`id`), UNIQUE KEY `name_unique` (`name`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS `t_ds_version`; CREATE TABLE `t_ds_version` ( `id` int(11) NOT NULL AUTO_INCREMENT, `version` varchar(200) NOT NULL, PRIMARY KEY (`id`), UNIQUE KEY `version_UNIQUE` (`version`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8 COMMENT='version'; -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO `t_ds_version` VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO `t_ds_alertgroup`(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ("1,2", 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO `t_ds_user` VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`), ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,147
[Bug] The genTaskCodeList interface returns the same code
Multiple requests to the genTaskCodeList interface within a short period of time may respond with the same code ![image](https://user-images.githubusercontent.com/87303815/132617237-b53dd19d-603d-4ffe-9364-e69357175f6f.png) 短时间内(≈1s)多次请求`/dolphinscheduler/projects/{projectCode}/task-definition/gen-task-codes`接口,可能返回相同的code
https://github.com/apache/dolphinscheduler/issues/6147
https://github.com/apache/dolphinscheduler/pull/6150
cca48d0a9249917f7dc2ecef1cad67e93e227555
e8ddc9103dbc6d0e69b5a1e10c43606ba84f7009
"2021-09-09T03:31:10Z"
java
"2021-09-09T14:01:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createProcessDefine(loginUser, result, taskRelationList, processDefinition, taskDefinitionLogs); } private void createTaskDefinition(Map<String, Object> result, User loginUser, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return; } } if (processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs)) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); } } private Map<String, Object> createProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<TaskNode> taskNodeList = processService.transformTask(taskRelationList, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateProcessDefine(loginUser, result, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { scheduleMapper.deleteById(schedule.getId()); } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); for (Schedule schedule : scheduleList) { logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); scheduleMapper.updateById(schedule); schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinitionLog> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionList) { taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } } int insert = taskDefinitionMapper.batchInsert(taskDefinitionList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return false; } List<ProcessTaskRelationLog> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); taskRelationList.forEach(processTaskRelationLog -> { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); }); Map<String, Object> createProcessResult = createProcessDefine(loginUser, result, taskRelationList, processDefinition, null); if (Status.SUCCESS.equals(createProcessResult.get(Constants.STATUS))) { putMsg(createProcessResult, Status.SUCCESS); } else { result.putAll(createProcessResult); return false; } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); return false; } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelationLog.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinitionLog>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.queryTaskDefinitionListByProcess(code, processDefinition.getVersion()); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); createProcessDefine(loginUser, result, taskRelationList, processDefinition, Lists.newArrayList()); } else { updateProcessDefine(loginUser, result, taskRelationList, processDefinition, null, Lists.newArrayList()); } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper .queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delele result code */ @Override public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,147
[Bug] The genTaskCodeList interface returns the same code
Multiple requests to the genTaskCodeList interface within a short period of time may respond with the same code ![image](https://user-images.githubusercontent.com/87303815/132617237-b53dd19d-603d-4ffe-9364-e69357175f6f.png) 短时间内(≈1s)多次请求`/dolphinscheduler/projects/{projectCode}/task-definition/gen-task-codes`接口,可能返回相同的code
https://github.com/apache/dolphinscheduler/issues/6147
https://github.com/apache/dolphinscheduler/pull/6150
cca48d0a9249917f7dc2ecef1cad67e93e227555
e8ddc9103dbc6d0e69b5a1e10c43606ba84f7009
"2021-09-09T03:31:10Z"
java
"2021-09-09T14:01:22Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/SnowFlakeUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Objects; public class SnowFlakeUtils { // start timestamp private static final long START_TIMESTAMP = 1609430400L; //2021-01-01 // Number of digits private static final long SEQUENCE_BIT = 13; private static final long MACHINE_BIT = 2; private static final long MAX_SEQUENCE = ~(-1L << SEQUENCE_BIT); // The displacement to the left private static final long MACHINE_LEFT = SEQUENCE_BIT; private static final long TIMESTAMP_LEFT = SEQUENCE_BIT + MACHINE_BIT; private final int machineId; private long sequence = 0L; private long lastTimestamp = -1L; private SnowFlakeUtils() throws SnowFlakeException { try { this.machineId = Math.abs(Objects.hash(InetAddress.getLocalHost().getHostName())) % 32; } catch (UnknownHostException e) { throw new SnowFlakeException(e.getMessage()); } } private static SnowFlakeUtils instance = null; public static synchronized SnowFlakeUtils getInstance() throws SnowFlakeException { if (instance == null) { instance = new SnowFlakeUtils(); } return instance; } public synchronized long nextId() throws SnowFlakeException { long currStmp = nowTimestamp(); if (currStmp < lastTimestamp) { throw new SnowFlakeException("Clock moved backwards. Refusing to generate id"); } if (currStmp == lastTimestamp) { sequence = (sequence + 1) & MAX_SEQUENCE; if (sequence == 0L) { currStmp = getNextMill(); } } else { sequence = 0L; } lastTimestamp = currStmp; return (currStmp - START_TIMESTAMP) << TIMESTAMP_LEFT | machineId << MACHINE_LEFT | sequence; } private long getNextMill() { long mill = nowTimestamp(); while (mill <= lastTimestamp) { mill = nowTimestamp(); } return mill; } private long nowTimestamp() { return System.currentTimeMillis() / 1000; } public static class SnowFlakeException extends Exception { public SnowFlakeException(String message) { super(message); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,146
[Bug] Save workflow error
I created a workflow as follows: ![image](https://user-images.githubusercontent.com/87303815/132616260-4e0be953-ecfe-497a-86ed-a5c0fe8ba404.png) I got an error when i clicked the save btn: ![image](https://user-images.githubusercontent.com/87303815/132616620-89a34a31-f6e4-4bab-9c5f-168c10b92907.png) **Which version of Dolphin Scheduler:** version: 2.0.0 branch: dev **Additional context** The request params: [request.txt](https://github.com/apache/dolphinscheduler/files/7133138/request.txt)
https://github.com/apache/dolphinscheduler/issues/6146
https://github.com/apache/dolphinscheduler/pull/6150
cca48d0a9249917f7dc2ecef1cad67e93e227555
e8ddc9103dbc6d0e69b5a1e10c43606ba84f7009
"2021-09-09T03:19:57Z"
java
"2021-09-09T14:01:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.StringUtils; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createProcessDefine(loginUser, result, taskRelationList, processDefinition, taskDefinitionLogs); } private void createTaskDefinition(Map<String, Object> result, User loginUser, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return; } } if (processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs)) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); } } private Map<String, Object> createProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<TaskNode> taskNodeList = processService.transformTask(taskRelationList, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateProcessDefine(loginUser, result, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { scheduleMapper.deleteById(schedule.getId()); } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); for (Schedule schedule : scheduleList) { logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); scheduleMapper.updateById(schedule); schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinitionLog> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionList) { taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } } int insert = taskDefinitionMapper.batchInsert(taskDefinitionList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return false; } List<ProcessTaskRelationLog> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); taskRelationList.forEach(processTaskRelationLog -> { processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); }); Map<String, Object> createProcessResult = createProcessDefine(loginUser, result, taskRelationList, processDefinition, null); if (Status.SUCCESS.equals(createProcessResult.get(Constants.STATUS))) { putMsg(createProcessResult, Status.SUCCESS); } else { result.putAll(createProcessResult); return false; } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); return false; } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelationLog.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinitionLog>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.queryTaskDefinitionListByProcess(code, processDefinition.getVersion()); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); createProcessDefine(loginUser, result, taskRelationList, processDefinition, Lists.newArrayList()); } else { updateProcessDefine(loginUser, result, taskRelationList, processDefinition, null, Lists.newArrayList()); } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper .queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delele result code */ @Override public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,146
[Bug] Save workflow error
I created a workflow as follows: ![image](https://user-images.githubusercontent.com/87303815/132616260-4e0be953-ecfe-497a-86ed-a5c0fe8ba404.png) I got an error when i clicked the save btn: ![image](https://user-images.githubusercontent.com/87303815/132616620-89a34a31-f6e4-4bab-9c5f-168c10b92907.png) **Which version of Dolphin Scheduler:** version: 2.0.0 branch: dev **Additional context** The request params: [request.txt](https://github.com/apache/dolphinscheduler/files/7133138/request.txt)
https://github.com/apache/dolphinscheduler/issues/6146
https://github.com/apache/dolphinscheduler/pull/6150
cca48d0a9249917f7dc2ecef1cad67e93e227555
e8ddc9103dbc6d0e69b5a1e10c43606ba84f7009
"2021-09-09T03:19:57Z"
java
"2021-09-09T14:01:22Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/SnowFlakeUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Objects; public class SnowFlakeUtils { // start timestamp private static final long START_TIMESTAMP = 1609430400L; //2021-01-01 // Number of digits private static final long SEQUENCE_BIT = 13; private static final long MACHINE_BIT = 2; private static final long MAX_SEQUENCE = ~(-1L << SEQUENCE_BIT); // The displacement to the left private static final long MACHINE_LEFT = SEQUENCE_BIT; private static final long TIMESTAMP_LEFT = SEQUENCE_BIT + MACHINE_BIT; private final int machineId; private long sequence = 0L; private long lastTimestamp = -1L; private SnowFlakeUtils() throws SnowFlakeException { try { this.machineId = Math.abs(Objects.hash(InetAddress.getLocalHost().getHostName())) % 32; } catch (UnknownHostException e) { throw new SnowFlakeException(e.getMessage()); } } private static SnowFlakeUtils instance = null; public static synchronized SnowFlakeUtils getInstance() throws SnowFlakeException { if (instance == null) { instance = new SnowFlakeUtils(); } return instance; } public synchronized long nextId() throws SnowFlakeException { long currStmp = nowTimestamp(); if (currStmp < lastTimestamp) { throw new SnowFlakeException("Clock moved backwards. Refusing to generate id"); } if (currStmp == lastTimestamp) { sequence = (sequence + 1) & MAX_SEQUENCE; if (sequence == 0L) { currStmp = getNextMill(); } } else { sequence = 0L; } lastTimestamp = currStmp; return (currStmp - START_TIMESTAMP) << TIMESTAMP_LEFT | machineId << MACHINE_LEFT | sequence; } private long getNextMill() { long mill = nowTimestamp(); while (mill <= lastTimestamp) { mill = nowTimestamp(); } return mill; } private long nowTimestamp() { return System.currentTimeMillis() / 1000; } public static class SnowFlakeException extends Exception { public SnowFlakeException(String message) { super(message); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,166
[Bug] [task-plugin] Python Task execute error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the newest dev version, python task is not migrated complete. When I execute python task in workflow, ds throws NullPointerException. ### What you expected to happen Python task execute correctly. ### How to reproduce - Add a python task into work flow. - Type simple script, just like`print('Hello')` - Then execute the work flow ### Anything else _No response_ ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6166
https://github.com/apache/dolphinscheduler/pull/6167
ea2a9dbcb2ca3f4be2da6a4f300e988640e72219
0715be34d5b4c553396374679ccb23fedc4c27a0
"2021-09-10T15:02:37Z"
java
"2021-09-11T04:25:24Z"
dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/task/AbstractTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.spi.task; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; /** * executive task */ public abstract class AbstractTask { /** * varPool string */ protected String varPool; /** * taskExecutionContext **/ TaskRequest taskRequest; /** * SHELL process pid */ protected int processId; /** * SHELL result string */ protected String resultString; /** * other resource manager appId , for example : YARN etc */ protected String appIds; /** * cancel */ protected volatile boolean cancel = false; /** * exit code */ protected volatile int exitStatusCode = -1; /** * constructor * * @param taskExecutionContext taskExecutionContext */ protected AbstractTask(TaskRequest taskExecutionContext) { this.taskRequest = taskExecutionContext; } /** * init task */ public void init() { } public String getPreScript() { return null; } public void setCommand(String command) throws Exception { } /** * task handle * * @throws Exception exception */ public abstract void handle() throws Exception; /** * cancel application * * @param status status * @throws Exception exception */ public void cancelApplication(boolean status) throws Exception { this.cancel = status; } public void setVarPool(String varPool) { this.varPool = varPool; } public String getVarPool() { return varPool; } /** * get exit status code * * @return exit status code */ public int getExitStatusCode() { return exitStatusCode; } public void setExitStatusCode(int exitStatusCode) { this.exitStatusCode = exitStatusCode; } public String getAppIds() { return appIds; } public void setAppIds(String appIds) { this.appIds = appIds; } public int getProcessId() { return processId; } public void setProcessId(int processId) { this.processId = processId; } public String getResultString() { return resultString; } public void setResultString(String resultString) { this.resultString = resultString; } /** * get task parameters * * @return AbstractParameters */ public abstract AbstractParameters getParameters(); /** * result processing maybe */ public void after() { } /** * get exit status according to exitCode * * @return exit status */ public ExecutionStatus getExitStatus() { ExecutionStatus status; switch (getExitStatusCode()) { case TaskConstants.EXIT_CODE_SUCCESS: status = ExecutionStatus.SUCCESS; break; case TaskConstants.EXIT_CODE_KILL: status = ExecutionStatus.KILL; break; default: status = ExecutionStatus.FAILURE; break; } return status; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,166
[Bug] [task-plugin] Python Task execute error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the newest dev version, python task is not migrated complete. When I execute python task in workflow, ds throws NullPointerException. ### What you expected to happen Python task execute correctly. ### How to reproduce - Add a python task into work flow. - Type simple script, just like`print('Hello')` - Then execute the work flow ### Anything else _No response_ ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6166
https://github.com/apache/dolphinscheduler/pull/6167
ea2a9dbcb2ca3f4be2da6a4f300e988640e72219
0715be34d5b4c553396374679ccb23fedc4c27a0
"2021-09-10T15:02:37Z"
java
"2021-09-11T04:25:24Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/AbstractCommandExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.api; import static org.apache.dolphinscheduler.spi.task.TaskConstants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.EXIT_CODE_KILL; import static org.apache.dolphinscheduler.spi.task.TaskConstants.SH; import org.apache.dolphinscheduler.plugin.task.util.LoggerUtils; import org.apache.dolphinscheduler.plugin.task.util.OSUtils; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.utils.StringUtils; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * abstract command executor */ public abstract class AbstractCommandExecutor { /** * rules for extracting application ID */ protected static final Pattern APPLICATION_REGEX = Pattern.compile(TaskConstants.APPLICATION_REGEX); protected StringBuilder varPool = new StringBuilder(); /** * process */ private Process process; /** * log handler */ protected Consumer<List<String>> logHandler; /** * logger */ protected Logger logger; /** * log list */ protected List<String> logBuffer; protected boolean logOutputIsSuccess = false; /* * SHELL result string */ protected String taskResultString; /** * taskRequest */ protected TaskRequest taskRequest; public AbstractCommandExecutor(Consumer<List<String>> logHandler, TaskRequest taskRequest, Logger logger) { this.logHandler = logHandler; this.taskRequest = taskRequest; this.logger = logger; this.logBuffer = Collections.synchronizedList(new ArrayList<>()); } public AbstractCommandExecutor(List<String> logBuffer) { this.logBuffer = logBuffer; } /** * build process * * @param commandFile command file * @throws IOException IO Exception */ private void buildProcess(String commandFile) throws IOException { // setting up user to run commands List<String> command = new LinkedList<>(); //init process builder ProcessBuilder processBuilder = new ProcessBuilder(); // setting up a working directory processBuilder.directory(new File(taskRequest.getExecutePath())); // merge error information to standard output stream processBuilder.redirectErrorStream(true); // setting up user to run commands command.add("sudo"); command.add("-u"); command.add(taskRequest.getTenantCode()); command.add(SH); command.addAll(Collections.emptyList()); command.add(commandFile); // setting commands processBuilder.command(command); process = processBuilder.start(); printCommand(command); } public TaskResponse run(String execCommand) throws IOException, InterruptedException { TaskResponse result = new TaskResponse(); int taskInstanceId = taskRequest.getTaskInstanceId(); if (null == TaskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) { result.setExitStatusCode(EXIT_CODE_KILL); return result; } if (StringUtils.isEmpty(execCommand)) { TaskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId); return result; } String commandFilePath = buildCommandFilePath(); // create command file if not exists createCommandFileIfNotExists(execCommand, commandFilePath); //build process buildProcess(commandFilePath); // parse process output parseProcessOutput(process); int processId = getProcessId(process); result.setProcessId(processId); // cache processId taskRequest.setProcessId(processId); boolean updateTaskExecutionContextStatus = TaskExecutionContextCacheManager.updateTaskExecutionContext(taskRequest); if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) { ProcessUtils.kill(taskRequest); result.setExitStatusCode(EXIT_CODE_KILL); return result; } // print process id logger.info("process start, process id is: {}", processId); // if timeout occurs, exit directly long remainTime = getRemainTime(); // waiting for the run to finish boolean status = process.waitFor(remainTime, TimeUnit.SECONDS); // if SHELL task exit if (status) { // set appIds List<String> appIds = getAppIds(taskRequest.getLogPath()); result.setAppIds(String.join(TaskConstants.COMMA, appIds)); // SHELL task state result.setExitStatusCode(process.exitValue()); } else { logger.error("process has failure , exitStatusCode:{}, processExitValue:{}, ready to kill ...", result.getExitStatusCode(), process.exitValue()); ProcessUtils.kill(taskRequest); result.setExitStatusCode(EXIT_CODE_FAILURE); } logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{} ,processWaitForStatus:{} ,processExitValue:{}", taskRequest.getExecutePath(), processId, result.getExitStatusCode(), status, process.exitValue()); return result; } public String getVarPool() { return varPool.toString(); } /** * cancel application * * @throws Exception exception */ public void cancelApplication() throws Exception { if (process == null) { return; } // clear log clear(); int processId = getProcessId(process); logger.info("cancel process: {}", processId); // kill , waiting for completion boolean killed = softKill(processId); if (!killed) { // hard kill hardKill(processId); // destory process.destroy(); process = null; } } /** * soft kill * * @param processId process id * @return process is alive */ private boolean softKill(int processId) { if (processId != 0 && process.isAlive()) { try { // sudo -u user command to run command String cmd = String.format("kill %d", processId); cmd = OSUtils.getSudoCmd(taskRequest.getTenantCode(), cmd); logger.info("soft kill task:{}, process id:{}, cmd:{}", taskRequest.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.info("kill attempt failed", e); } } return process.isAlive(); } /** * hard kill * * @param processId process id */ private void hardKill(int processId) { if (processId != 0 && process.isAlive()) { try { String cmd = String.format("kill -9 %d", processId); cmd = OSUtils.getSudoCmd(taskRequest.getTenantCode(), cmd); logger.info("hard kill task:{}, process id:{}, cmd:{}", taskRequest.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.error("kill attempt failed ", e); } } } private void printCommand(List<String> commands) { logger.info("task run command: {}", String.join(" ", commands)); } /** * clear */ private void clear() { List<String> markerList = new ArrayList<>(); markerList.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString()); if (!logBuffer.isEmpty()) { // log handle logHandler.accept(logBuffer); logBuffer.clear(); } logHandler.accept(markerList); } /** * get the standard output of the process * * @param process process */ private void parseProcessOutput(Process process) { String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskRequest.getTaskAppId()); ExecutorService getOutputLogService = newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService"); getOutputLogService.submit(() -> { try (BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { String line; logBuffer.add("welcome to use bigdata scheduling system..."); while ((line = inReader.readLine()) != null) { if (line.startsWith("${setValue(")) { varPool.append(line, "${setValue(".length(), line.length() - 2); varPool.append("$VarPool$"); } else { logBuffer.add(line); taskResultString = line; } } logOutputIsSuccess = true; } catch (Exception e) { logger.error(e.getMessage(), e); logOutputIsSuccess = true; } }); getOutputLogService.shutdown(); ExecutorService parseProcessOutputExecutorService = newDaemonSingleThreadExecutor(threadLoggerInfoName); parseProcessOutputExecutorService.submit(() -> { try { long lastFlushTime = System.currentTimeMillis(); while (logBuffer.size() > 0 || !logOutputIsSuccess) { if (logBuffer.size() > 0) { lastFlushTime = flush(lastFlushTime); } else { Thread.sleep(TaskConstants.DEFAULT_LOG_FLUSH_INTERVAL); } } } catch (Exception e) { Thread.currentThread().interrupt(); logger.error(e.getMessage(), e); } finally { clear(); } }); parseProcessOutputExecutorService.shutdown(); } /** * get app links * * @param logPath log path * @return app id list */ private List<String> getAppIds(String logPath) { List<String> logs = convertFile2List(logPath); List<String> appIds = new ArrayList<>(); /* * analysis log?get submited yarn application id */ for (String log : logs) { String appId = findAppId(log); if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) { logger.info("find app id: {}", appId); appIds.add(appId); } } return appIds; } /** * convert file to list * * @param filename file name * @return line list */ private List<String> convertFile2List(String filename) { List<String> lineList = new ArrayList<>(100); File file = new File(filename); if (!file.exists()) { return lineList; } try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8))) { String line; while ((line = br.readLine()) != null) { lineList.add(line); } } catch (Exception e) { logger.error(String.format("read file: %s failed : ", filename), e); } return lineList; } /** * find app id * * @param line line * @return appid */ private String findAppId(String line) { Matcher matcher = APPLICATION_REGEX.matcher(line); if (matcher.find()) { return matcher.group(); } return null; } /** * get remain time(s) * * @return remain time */ private long getRemainTime() { long usedTime = (System.currentTimeMillis() - taskRequest.getStartTime().getTime()) / 1000; long remainTime = taskRequest.getTaskTimeout() - usedTime; if (remainTime < 0) { throw new RuntimeException("task execution time out"); } return remainTime; } /** * get process id * * @param process process * @return process id */ private int getProcessId(Process process) { int processId = 0; try { Field f = process.getClass().getDeclaredField(TaskConstants.PID); f.setAccessible(true); processId = f.getInt(process); } catch (Throwable e) { logger.error(e.getMessage(), e); } return processId; } /** * when log buffer siz or flush time reach condition , then flush * * @param lastFlushTime last flush time * @return last flush time */ private long flush(long lastFlushTime) { long now = System.currentTimeMillis(); /* * when log buffer siz or flush time reach condition , then flush */ if (logBuffer.size() >= TaskConstants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > TaskConstants.DEFAULT_LOG_FLUSH_INTERVAL) { lastFlushTime = now; logHandler.accept(logBuffer); logBuffer.clear(); } return lastFlushTime; } protected abstract String buildCommandFilePath(); protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException; ExecutorService newDaemonSingleThreadExecutor(String threadName) { ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadName) .build(); return Executors.newSingleThreadExecutor(threadFactory); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,166
[Bug] [task-plugin] Python Task execute error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In the newest dev version, python task is not migrated complete. When I execute python task in workflow, ds throws NullPointerException. ### What you expected to happen Python task execute correctly. ### How to reproduce - Add a python task into work flow. - Type simple script, just like`print('Hello')` - Then execute the work flow ### Anything else _No response_ ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6166
https://github.com/apache/dolphinscheduler/pull/6167
ea2a9dbcb2ca3f4be2da6a4f300e988640e72219
0715be34d5b4c553396374679ccb23fedc4c27a0
"2021-09-10T15:02:37Z"
java
"2021-09-11T04:25:24Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-python/src/main/java/org/apache/dolphinscheduler/plugin/task/python/PythonTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.python; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskException; import org.apache.dolphinscheduler.plugin.task.api.TaskResponse; import org.apache.dolphinscheduler.spi.task.AbstractParameters; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.utils.JSONUtils; /** * python task */ public class PythonTask extends AbstractTaskExecutor { /** * python parameters */ private PythonParameters pythonParameters; /** * task dir */ private String taskDir; /** * python command executor */ private PythonCommandExecutor pythonCommandExecutor; private TaskRequest taskRequest; private String command; /** * constructor * * @param taskRequest taskRequest */ public PythonTask(TaskRequest taskRequest) { super(taskRequest); this.taskRequest = taskRequest; this.pythonCommandExecutor = new PythonCommandExecutor(this::logHandle, taskRequest, logger); } @Override public void init() { logger.info("python task params {}", taskRequest.getTaskParams()); pythonParameters = JSONUtils.parseObject(taskRequest.getTaskParams(), PythonParameters.class); if (!pythonParameters.checkParameters()) { throw new TaskException("python task params is not valid"); } } @Override public String getPreScript() { String rawPythonScript = pythonParameters.getRawScript().replaceAll("\\r\\n", "\n"); try { rawPythonScript = convertPythonScriptPlaceholders(rawPythonScript); } catch (StringIndexOutOfBoundsException e) { logger.error("setShareVar field format error, raw python script : {}", rawPythonScript); } return rawPythonScript; } @Override public void setCommand(String command) { this.command = command; } @Override public void handle() throws Exception { try { // construct process TaskResponse taskResponse = pythonCommandExecutor.run(command); setExitStatusCode(taskResponse.getExitStatusCode()); setAppIds(taskResponse.getAppIds()); setProcessId(taskResponse.getProcessId()); setVarPool(pythonCommandExecutor.getVarPool()); } catch (Exception e) { logger.error("python task failure", e); setExitStatusCode(TaskConstants.EXIT_CODE_FAILURE); throw new TaskException("run python task error",e); } } @Override public void cancelApplication(boolean cancelApplication) throws Exception { // cancel process pythonCommandExecutor.cancelApplication(); } @Override public AbstractParameters getParameters() { return pythonParameters; } /** * convertPythonScriptPlaceholders * * @param rawScript rawScript * @return String * @throws StringIndexOutOfBoundsException StringIndexOutOfBoundsException */ private static String convertPythonScriptPlaceholders(String rawScript) throws StringIndexOutOfBoundsException { int len = "${setShareVar(${".length(); int scriptStart = 0; while ((scriptStart = rawScript.indexOf("${setShareVar(${", scriptStart)) != -1) { int start = -1; int end = rawScript.indexOf('}', scriptStart + len); String prop = rawScript.substring(scriptStart + len, end); start = rawScript.indexOf(',', end); end = rawScript.indexOf(')', start); String value = rawScript.substring(start + 1, end); start = rawScript.indexOf('}', start) + 1; end = rawScript.length(); String replaceScript = String.format("print(\"${{setValue({},{})}}\".format(\"%s\",%s))", prop, value); rawScript = rawScript.substring(0, scriptStart) + replaceScript + rawScript.substring(start, end); scriptStart += replaceScript.length(); } return rawScript; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,159
[Improve] [SQL] Set the same default value for the field of environment code in Mysql and Postgres
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The field of the environment code is set to different default value in the file dolphinscheduler_postgre.sql and dolphinscheduler_mysql.sql . In the dolphinscheduler_mysql.sql , the default value of the envrionment code is set to '-1': ![image](https://user-images.githubusercontent.com/4928204/132790181-b45d5e81-df5f-49fc-88fd-cfbff8be8115.png) But in the dolphinscheduler_postgre.sql, the default value of the envrionment code is set to null: ![image](https://user-images.githubusercontent.com/4928204/132790341-16df3422-d696-4ba8-b024-65ee1d5274d9.png) ### What you expected to happen We'd better to use the same default value for the environment code in different types of database. ### How to reproduce Please check out these two files, and you will see the differences. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6159
https://github.com/apache/dolphinscheduler/pull/6160
58b694a85cf91a193a8768b370739d6cf23022a7
6dedafb8ff2dd413cd82d8d2f981f5774221c533
"2021-09-10T02:42:36Z"
java
"2021-09-11T11:49:32Z"
sql/dolphinscheduler_h2.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME varchar(120) NOT NULL, JOB_NAME varchar(200) NOT NULL, JOB_GROUP varchar(200) NOT NULL, DESCRIPTION varchar(250) DEFAULT NULL, JOB_CLASS_NAME varchar(250) NOT NULL, IS_DURABLE varchar(1) NOT NULL, IS_NONCONCURRENT varchar(1) NOT NULL, IS_UPDATE_DATA varchar(1) NOT NULL, REQUESTS_RECOVERY varchar(1) NOT NULL, JOB_DATA blob, PRIMARY KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) ); -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_TRIGGERS; CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, JOB_NAME varchar(200) NOT NULL, JOB_GROUP varchar(200) NOT NULL, DESCRIPTION varchar(250) DEFAULT NULL, NEXT_FIRE_TIME bigint(13) DEFAULT NULL, PREV_FIRE_TIME bigint(13) DEFAULT NULL, PRIORITY int(11) DEFAULT NULL, TRIGGER_STATE varchar(16) NOT NULL, TRIGGER_TYPE varchar(8) NOT NULL, START_TIME bigint(13) NOT NULL, END_TIME bigint(13) DEFAULT NULL, CALENDAR_NAME varchar(200) DEFAULT NULL, MISFIRE_INSTR smallint(2) DEFAULT NULL, JOB_DATA blob, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP) ); -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, BLOB_DATA blob, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME varchar(120) NOT NULL, CALENDAR_NAME varchar(200) NOT NULL, CALENDAR blob NOT NULL, PRIMARY KEY (SCHED_NAME, CALENDAR_NAME) ); -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, CRON_EXPRESSION varchar(120) NOT NULL, TIME_ZONE_ID varchar(80) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_CRON_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, ENTRY_ID varchar(200) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, INSTANCE_NAME varchar(200) NOT NULL, FIRED_TIME bigint(13) NOT NULL, SCHED_TIME bigint(13) NOT NULL, PRIORITY int(11) NOT NULL, STATE varchar(16) NOT NULL, JOB_NAME varchar(200) DEFAULT NULL, JOB_GROUP varchar(200) DEFAULT NULL, IS_NONCONCURRENT varchar(1) DEFAULT NULL, REQUESTS_RECOVERY varchar(1) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, ENTRY_ID) ); -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_LOCKS; CREATE TABLE QRTZ_LOCKS ( SCHED_NAME varchar(120) NOT NULL, LOCK_NAME varchar(40) NOT NULL, PRIMARY KEY (SCHED_NAME, LOCK_NAME) ); -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME varchar(120) NOT NULL, INSTANCE_NAME varchar(200) NOT NULL, LAST_CHECKIN_TIME bigint(13) NOT NULL, CHECKIN_INTERVAL bigint(13) NOT NULL, PRIMARY KEY (SCHED_NAME, INSTANCE_NAME) ); -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, REPEAT_COUNT bigint(7) NOT NULL, REPEAT_INTERVAL bigint(12) NOT NULL, TIMES_TRIGGERED bigint(10) NOT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPLE_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, STR_PROP_1 varchar(512) DEFAULT NULL, STR_PROP_2 varchar(512) DEFAULT NULL, STR_PROP_3 varchar(512) DEFAULT NULL, INT_PROP_1 int(11) DEFAULT NULL, INT_PROP_2 int(11) DEFAULT NULL, LONG_PROP_1 bigint(20) DEFAULT NULL, LONG_PROP_2 bigint(20) DEFAULT NULL, DEC_PROP_1 decimal(13, 4) DEFAULT NULL, DEC_PROP_2 decimal(13, 4) DEFAULT NULL, BOOL_PROP_1 varchar(1) DEFAULT NULL, BOOL_PROP_2 varchar(1) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPROP_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) DEFAULT NULL, token varchar(64) DEFAULT NULL, expire_time datetime DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int(11) NOT NULL AUTO_INCREMENT, title varchar(64) DEFAULT NULL, content text, alert_status tinyint(4) DEFAULT '0', log text, alertgroup_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup ( id int(11) NOT NULL AUTO_INCREMENT, alert_instance_ids varchar(255) DEFAULT NULL, create_user_id int(11) DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_alertgroup_name_un (group_name) ); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int(11) NOT NULL AUTO_INCREMENT, command_type tinyint(4) DEFAULT NULL, process_definition_id int(11) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, executor_id int(11) DEFAULT NULL, update_time datetime DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64), environment_code bigint(20) DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(64) NOT NULL, note varchar(255) DEFAULT NULL, type tinyint(4) NOT NULL, user_id int(11) NOT NULL, connection_params text NOT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_datasource_name_un (name, type) ); -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int(11) NOT NULL, command_type tinyint(4) DEFAULT NULL, executor_id int(11) DEFAULT NULL, process_definition_id int(11) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64), environment_code bigint(20) DEFAULT NULL, message text, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(255) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, release_state tinyint(4) DEFAULT NULL, user_id int(11) DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT NULL, locations text, warning_group_id int(11) DEFAULT NULL, timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY process_unique (name,project_code) USING BTREE, UNIQUE KEY code_unique (code) ); -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, release_state tinyint(4) DEFAULT NULL, user_id int(11) DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT NULL, locations text, warning_group_id int(11) DEFAULT NULL, timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, user_id int(11) DEFAULT NULL, task_type varchar(50) NOT NULL, task_params longtext, flag tinyint(2) DEFAULT NULL, task_priority tinyint(4) DEFAULT NULL, worker_group varchar(200) DEFAULT NULL, environment_code bigint(20) DEFAULT NULL, fail_retry_times int(11) DEFAULT NULL, fail_retry_interval int(11) DEFAULT NULL, timeout_flag tinyint(2) DEFAULT '0', timeout_notify_strategy tinyint(4) DEFAULT NULL, timeout int(11) DEFAULT '0', delay_time int(11) DEFAULT '0', resource_ids varchar(255) DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id, code), UNIQUE KEY task_unique (name,project_code) USING BTREE ); -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, user_id int(11) DEFAULT NULL, task_type varchar(50) NOT NULL, task_params text, flag tinyint(2) DEFAULT NULL, task_priority tinyint(4) DEFAULT NULL, worker_group varchar(200) DEFAULT NULL, environment_code bigint(20) DEFAULT NULL, fail_retry_times int(11) DEFAULT NULL, fail_retry_interval int(11) DEFAULT NULL, timeout_flag tinyint(2) DEFAULT '0', timeout_notify_strategy tinyint(4) DEFAULT NULL, timeout int(11) DEFAULT '0', delay_time int(11) DEFAULT '0', resource_ids varchar(255) DEFAULT NULL, operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(200) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, project_code bigint(20) NOT NULL, process_definition_code bigint(20) NOT NULL, pre_task_code bigint(20) NOT NULL, pre_task_version int(11) NOT NULL, post_task_code bigint(20) NOT NULL, post_task_version int(11) NOT NULL, condition_type tinyint(2) DEFAULT NULL, condition_params text, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(200) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, project_code bigint(20) NOT NULL, process_definition_code bigint(20) NOT NULL, pre_task_code bigint(20) NOT NULL, pre_task_version int(11) NOT NULL, post_task_code bigint(20) NOT NULL, post_task_version int(11) NOT NULL, condition_type tinyint(2) DEFAULT NULL, condition_params text, operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(255) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, process_definition_code bigint(20) not NULL, state tinyint(4) DEFAULT NULL, recovery tinyint(4) DEFAULT NULL, start_time datetime DEFAULT NULL, end_time datetime DEFAULT NULL, run_times int(11) DEFAULT NULL, host varchar(135) DEFAULT NULL, command_type tinyint(4) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, max_try_times tinyint(4) DEFAULT '0', failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, command_start_time datetime DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT '1', update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, is_sub_process int(11) DEFAULT '0', executor_id int(11) NOT NULL, history_cmd text, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT NULL, environment_code bigint(20) DEFAULT NULL, timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', var_pool longtext, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(100) DEFAULT NULL, code bigint(20) NOT NULL, description varchar(200) DEFAULT NULL, user_id int(11) DEFAULT NULL, flag tinyint(4) DEFAULT '1', create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int(11) NOT NULL AUTO_INCREMENT, queue_name varchar(64) DEFAULT NULL, queue varchar(64) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO t_ds_queue VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, datasource_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int(11) NOT NULL AUTO_INCREMENT, parent_process_instance_id int(11) DEFAULT NULL, parent_task_instance_id int(11) DEFAULT NULL, process_instance_id int(11) DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, project_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, resources_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, udf_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int(11) NOT NULL AUTO_INCREMENT, alias varchar(64) DEFAULT NULL, file_name varchar(64) DEFAULT NULL, description varchar(255) DEFAULT NULL, user_id int(11) DEFAULT NULL, type tinyint(4) DEFAULT NULL, size bigint(20) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, pid int(11) DEFAULT NULL, full_name varchar(64) DEFAULT NULL, is_directory tinyint(4) DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_resources_un (full_name, type) ); -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int(11) NOT NULL AUTO_INCREMENT, process_definition_id int(11) NOT NULL, start_time datetime NOT NULL, end_time datetime NOT NULL, timezone_id varchar(40) DEFAULT NULL, crontab varchar(255) NOT NULL, failure_strategy tinyint(4) NOT NULL, user_id int(11) NOT NULL, release_state tinyint(4) NOT NULL, warning_type tinyint(4) NOT NULL, warning_group_id int(11) DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT '', environment_code bigint(20) DEFAULT NULL, create_time datetime NOT NULL, update_time datetime NOT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL, user_id int(11) DEFAULT NULL, ip varchar(45) DEFAULT NULL, last_login_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(255) DEFAULT NULL, task_type varchar(50) NOT NULL, task_code bigint(20) NOT NULL, task_definition_version int(11) DEFAULT NULL, process_instance_id int(11) DEFAULT NULL, state tinyint(4) DEFAULT NULL, submit_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, end_time datetime DEFAULT NULL, host varchar(135) DEFAULT NULL, execute_path varchar(200) DEFAULT NULL, log_path varchar(200) DEFAULT NULL, alert_flag tinyint(4) DEFAULT NULL, retry_times int(4) DEFAULT '0', pid int(4) DEFAULT NULL, app_link text, task_params text, flag tinyint(4) DEFAULT '1', retry_interval int(4) DEFAULT NULL, max_retry_times int(2) DEFAULT NULL, task_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT NULL, environment_code bigint(20) DEFAULT NULL, environment_config text DEFAULT '', executor_id int(11) DEFAULT NULL, first_submit_time datetime DEFAULT NULL, delay_time int(4) DEFAULT '0', var_pool longtext, PRIMARY KEY (id), FOREIGN KEY (process_instance_id) REFERENCES t_ds_process_instance (id) ON DELETE CASCADE ); -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int(11) NOT NULL AUTO_INCREMENT, tenant_code varchar(64) DEFAULT NULL, description varchar(255) DEFAULT NULL, queue_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, func_name varchar(100) NOT NULL, class_name varchar(255) NOT NULL, type tinyint(4) NOT NULL, arg_types varchar(255) DEFAULT NULL, database varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, resource_id int(11) NOT NULL, resource_name varchar(255) NOT NULL, create_time datetime NOT NULL, update_time datetime NOT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int(11) NOT NULL AUTO_INCREMENT, user_name varchar(64) DEFAULT NULL, user_password varchar(64) DEFAULT NULL, user_type tinyint(4) DEFAULT NULL, email varchar(64) DEFAULT NULL, phone varchar(11) DEFAULT NULL, tenant_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, queue varchar(64) DEFAULT NULL, state int(1) DEFAULT 1, PRIMARY KEY (id), UNIQUE KEY user_name_unique (user_name) ); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint(11) NOT NULL AUTO_INCREMENT, name varchar(255) NOT NULL, addr_list text NULL DEFAULT NULL, create_time datetime NULL DEFAULT NULL, update_time datetime NULL DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY name_unique (name) ); -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(200) NOT NULL, PRIMARY KEY (id), UNIQUE KEY version_UNIQUE (version) ); -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO t_ds_version VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO t_ds_user VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id int NOT NULL AUTO_INCREMENT, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id), UNIQUE KEY t_ds_plugin_define_UN (plugin_name,plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id int NOT NULL AUTO_INCREMENT, plugin_define_id int NOT NULL, plugin_instance_params text, create_time timestamp NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, instance_name varchar(200) DEFAULT NULL, PRIMARY KEY (id) ); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id int NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id), UNIQUE KEY environment_name_unique (name), UNIQUE KEY environment_code_unique (code) ); -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id int NOT NULL AUTO_INCREMENT, environment_code bigint(20) NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id) , UNIQUE KEY environment_worker_group_unique (environment_code,worker_group) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,159
[Improve] [SQL] Set the same default value for the field of environment code in Mysql and Postgres
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The field of the environment code is set to different default value in the file dolphinscheduler_postgre.sql and dolphinscheduler_mysql.sql . In the dolphinscheduler_mysql.sql , the default value of the envrionment code is set to '-1': ![image](https://user-images.githubusercontent.com/4928204/132790181-b45d5e81-df5f-49fc-88fd-cfbff8be8115.png) But in the dolphinscheduler_postgre.sql, the default value of the envrionment code is set to null: ![image](https://user-images.githubusercontent.com/4928204/132790341-16df3422-d696-4ba8-b024-65ee1d5274d9.png) ### What you expected to happen We'd better to use the same default value for the environment code in different types of database. ### How to reproduce Please check out these two files, and you will see the differences. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6159
https://github.com/apache/dolphinscheduler/pull/6160
58b694a85cf91a193a8768b370739d6cf23022a7
6dedafb8ff2dd413cd82d8d2f981f5774221c533
"2021-09-10T02:42:36Z"
java
"2021-09-11T11:49:32Z"
sql/dolphinscheduler_postgre.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80)) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT NULL, PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(255) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , executor_id int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT NULL, message text , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_master_server -- -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT NULL, fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT task_definition_unique UNIQUE (name, project_code) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT NULL, fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , process_definition_code bigint DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , environment_code bigint DEFAULT NULL, timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory int, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_code bigint NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(255) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT NULL, create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT NULL, environment_config text, executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , var_pool text , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(255) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id serial NOT NULL, code bigint NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT environment_name_unique UNIQUE (name), CONSTRAINT environment_code_unique UNIQUE (code) ); -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id serial NOT NULL, environment_code bigint NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id) , CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,159
[Improve] [SQL] Set the same default value for the field of environment code in Mysql and Postgres
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The field of the environment code is set to different default value in the file dolphinscheduler_postgre.sql and dolphinscheduler_mysql.sql . In the dolphinscheduler_mysql.sql , the default value of the envrionment code is set to '-1': ![image](https://user-images.githubusercontent.com/4928204/132790181-b45d5e81-df5f-49fc-88fd-cfbff8be8115.png) But in the dolphinscheduler_postgre.sql, the default value of the envrionment code is set to null: ![image](https://user-images.githubusercontent.com/4928204/132790341-16df3422-d696-4ba8-b024-65ee1d5274d9.png) ### What you expected to happen We'd better to use the same default value for the environment code in different types of database. ### How to reproduce Please check out these two files, and you will see the differences. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6159
https://github.com/apache/dolphinscheduler/pull/6160
58b694a85cf91a193a8768b370739d6cf23022a7
6dedafb8ff2dd413cd82d8d2f981f5774221c533
"2021-09-10T02:42:36Z"
java
"2021-09-11T11:49:32Z"
sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_user_A_state drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_user' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_user_A_state; DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state; -- uc_dolphin_T_t_ds_tenant_A_tenant_name drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_tenant' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP `tenant_name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_tenant_A_tenant_name; DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name; -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time; -- uc_dolphin_T_t_ds_task_instance_A_delay_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time; -- uc_dolphin_T_t_ds_task_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_definition_A_modify_by drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version; delimiter d// CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version() BEGIN CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_json` longtext COMMENT 'process definition json content', `description` text, `global_params` text COMMENT 'global parameters', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `receivers` text COMMENT 'receivers', `receivers_cc` text COMMENT 'cc', `create_time` datetime DEFAULT NULL COMMENT 'create time', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids', PRIMARY KEY (`id`), UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE, KEY `process_definition_index` (`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8; END; d// delimiter ; CALL ct_dolphin_T_t_ds_process_definition_version; DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version; -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id; -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id; -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_alertgroup_name_un') THEN ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_datasource' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_datasource_name_un') THEN ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; -- uc_dolphin_T_t_ds_schedules_A_add_timezone drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment config name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; ALTER TABLE t_ds_task_definition ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_definition_log ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_command ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_error_command ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_schedules ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_process_instance ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_instance ADD COLUMN `environment_code` bigint(20) default NULL COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_instance ADD COLUMN `environment_config` text COMMENT 'environment config' AFTER `environment_code`; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_alertgroup DROP `group_type`; -- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP `dependence`; -- ALTER TABLE t_ds_error_command DROP `dependence`;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,159
[Improve] [SQL] Set the same default value for the field of environment code in Mysql and Postgres
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened The field of the environment code is set to different default value in the file dolphinscheduler_postgre.sql and dolphinscheduler_mysql.sql . In the dolphinscheduler_mysql.sql , the default value of the envrionment code is set to '-1': ![image](https://user-images.githubusercontent.com/4928204/132790181-b45d5e81-df5f-49fc-88fd-cfbff8be8115.png) But in the dolphinscheduler_postgre.sql, the default value of the envrionment code is set to null: ![image](https://user-images.githubusercontent.com/4928204/132790341-16df3422-d696-4ba8-b024-65ee1d5274d9.png) ### What you expected to happen We'd better to use the same default value for the environment code in different types of database. ### How to reproduce Please check out these two files, and you will see the differences. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6159
https://github.com/apache/dolphinscheduler/pull/6160
58b694a85cf91a193a8768b370739d6cf23022a7
6dedafb8ff2dd413cd82d8d2f981f5774221c533
"2021-09-10T02:42:36Z"
java
"2021-09-11T11:49:32Z"
sql/upgrade/1.4.0_schema/postgresql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ -- uc_dolphin_T_t_ds_user_A_state delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_user_A_state(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_user_A_state() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_user' AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD COLUMN state int DEFAULT 1; comment on column t_ds_user.state is 'state 0:disable 1:enable'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_user_A_state(); DROP FUNCTION uc_dolphin_T_t_ds_user_A_state(); -- uc_dolphin_T_t_ds_tenant_A_tenant_name delimiter ; DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name(); delimiter d// CREATE FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name() RETURNS void AS $$ BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_CATALOG=current_database() AND TABLE_SCHEMA=current_schema() AND TABLE_NAME='t_ds_tenant' AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP COLUMN "tenant_name"; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; select uc_dolphin_T_t_ds_tenant_A_tenant_name(); DROP FUNCTION uc_dolphin_T_t_ds_tenant_A_tenant_name(); -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_first_submit_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN first_submit_time timestamp DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); -- uc_dolphin_T_t_ds_task_instance_A_delay_time delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_delay_time() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD COLUMN delay_time int DEFAULT '0'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time(); -- uc_dolphin_T_t_ds_task_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_task_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_instance_A_var_pool delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_instance_A_var_pool() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD COLUMN var_pool text; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool(); -- uc_dolphin_T_t_ds_process_definition_A_modify_by delimiter d// CREATE OR REPLACE FUNCTION ct_dolphin_T_t_ds_process_definition_version() RETURNS void AS $$ BEGIN CREATE TABLE IF NOT EXISTS t_ds_process_definition_version ( id int NOT NULL , process_definition_id int NOT NULL , version int DEFAULT NULL , process_definition_json text , description text , global_params text , locations text , connects text , receivers text , receivers_cc text , create_time timestamp DEFAULT NULL , timeout int DEFAULT '0' , resource_ids varchar(64), PRIMARY KEY (id) ) ; create index process_definition_id_and_version on t_ds_process_definition_version (process_definition_id,version); DROP SEQUENCE IF EXISTS t_ds_process_definition_version_id_sequence; CREATE SEQUENCE t_ds_process_definition_version_id_sequence; ALTER TABLE t_ds_process_definition_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_version_id_sequence'); END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT ct_dolphin_T_t_ds_process_definition_version(); DROP FUNCTION IF EXISTS ct_dolphin_T_t_ds_process_definition_version(); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN warning_group_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_process_definition_version.warning_group_id IS 'alert group id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN alert_instance_ids varchar (255) DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.alert_instance_ids IS 'alert instance ids'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_create_user_id() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN create_user_id int4 DEFAULT NULL; COMMENT ON COLUMN t_ds_alertgroup.create_user_id IS 'create user id'; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_alertgroup' AND indexrelname ='t_ds_alertgroup_name_un') THEN ALTER TABLE t_ds_alertgroup ADD CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM pg_stat_all_indexes WHERE relname='t_ds_datasource' AND indexrelname ='t_ds_datasource_name_un') THEN ALTER TABLE t_ds_datasource ADD CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type); END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); -- uc_dolphin_T_t_ds_schedules_A_add_timezone delimiter d// CREATE OR REPLACE FUNCTION uc_dolphin_T_t_ds_schedules_A_add_timezone() RETURNS void AS $$ BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN timezone_id varchar(40) DEFAULT NULL; END IF; END; $$ LANGUAGE plpgsql; d// delimiter ; SELECT uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone(); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id serial NOT NULL , code bigint NOT NULL, name varchar(100) DEFAULT NULL , config text DEFAULT NULL , description text , operator int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT environment_name_unique UNIQUE (name), CONSTRAINT environment_code_unique UNIQUE (code) ); ALTER TABLE t_ds_task_definition ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_task_definition.environment_code is 'environment code'; ALTER TABLE t_ds_task_definition_log ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_task_definition_log.environment_code is 'environment code'; ALTER TABLE t_ds_command ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_command.environment_code is 'environment code'; ALTER TABLE t_ds_error_command ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_error_command.environment_code is 'environment code'; ALTER TABLE t_ds_schedules ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_schedules.environment_code is 'environment code'; ALTER TABLE t_ds_process_instance ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_process_instance.environment_code is 'environment code'; ALTER TABLE t_ds_task_instance ADD COLUMN environment_code bigint DEFAULT NULL; comment on column t_ds_task_instance.environment_code is 'environment code'; ALTER TABLE t_ds_task_instance ADD COLUMN environment_config text; comment on column t_ds_task_instance.environment_config is 'environment config'; -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id serial NOT NULL, environment_code bigint NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id) , CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group) ); -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP COLUMN "show_type", DROP COLUMN "alert_type", DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_alertgroup DROP COLUMN "group_type"; -- ALTER TABLE t_ds_process_definition DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- ALTER TABLE t_ds_process_definition_version DROP COLUMN "receivers", DROP COLUMN "receivers_cc"; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP COLUMN "dependence"; -- ALTER TABLE t_ds_error_command DROP COLUMN "dependence";
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,194
[Bug] [Server] Master build dag error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Perform multi version tasks ![image](https://user-images.githubusercontent.com/42576980/133109069-2b465273-4ebb-4f83-b9b4-ae0f8bce473d.png) ### What you expected to happen Normal execution ### How to reproduce Perform multi version tasks ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6194
https://github.com/apache/dolphinscheduler/pull/6195
7029062f4c2f247e9eac333e28e36e66b03fb435
71e2c8808b3a0871619af4968dfaaa423a90b4c6
"2021-09-13T15:09:21Z"
java
"2021-09-14T03:46:47Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.HashBasedTable; import com.google.common.collect.Lists; import com.google.common.collect.Table; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * runing TaskNode */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * task exec service */ private final ExecutorService taskExecService; /** * process instance */ private ProcessInstance processInstance; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * recover node id list */ private List<TaskInstance> recoverNodeIdList = new ArrayList<>(); /** * error task list */ private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>(); /** * complete task list */ private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * depend failed task map */ private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>(); /** * forbidden task map */ private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>(); /** * skip task map */ private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>(); /** * recover tolerance fault task list */ private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>(); /** * alert manager */ private ProcessAlertManager processAlertManager; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * process service */ private ProcessService processService; /** * master config */ private MasterConfig masterConfig; /** * */ private NettyExecutorManager nettyExecutorManager; private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); private List<Date> complementListDate = Lists.newLinkedList(); private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create(); private ProcessDefinition processDefinition; private String key; private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param taskTimeoutCheckList */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; int masterTaskExecNum = masterConfig.getMasterExecTaskNum(); this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread", masterTaskExecNum); this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; } @Override public void run() { try { startProcess(); handleEvents(); } catch (Exception e) { logger.error("handler error:", e); } } private void handleEvents() { while (this.stateEvents.size() > 0) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("{}_{}_{}", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkStateEvent(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean taskTimeout(StateEvent stateEvent) { if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) { return true; } TaskInstance taskInstance = taskInstanceHashMap .row(stateEvent.getTaskInstanceId()) .values() .iterator().next(); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); if (stateEvent.getExecutionStatus().typeIsFinished()) { taskFinished(task); } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent.toString()); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); return; } ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId()); completeTaskList.put(task.getName(), task); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(task.getName()); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { submitPostNode(task.getName()); } else { errorTaskList.put(task.getName(), task); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } private boolean checkStateEvent(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent.toString()); return false; } return true; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); processInstance = processService.findProcessInstanceById(this.processInstance.getId()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (stateEvent.getExecutionStatus() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { // complement data ends || no success return false; } scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceHashMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { buildFlowDag(); if (this.taskInstanceHashMap.size() == 0) { initTaskQueue(); submitPostNode(null); } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.genTaskNodeList(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion(), new HashMap<>()); forbiddenTaskList.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskList.put(taskNode.getName(), taskNode); } }); // generate process to get DAG info List<String> recoveryNameList = getRecoveryNodeNameList(); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNameList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTask.clear(); completeTaskList.clear(); errorTaskList.clear(); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : taskInstanceList) { if (task.isTaskComplete()) { completeTaskList.put(task.getName(), task); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskList.put(task.getName(), task); } } if (complementListDate.size() == 0 && needComplementProcess()) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); if (startDate.after(endDate)) { Date tmp = startDate; startDate = endDate; endDate = tmp; } List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); complementListDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedules)); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval()); if (submit) { this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } else { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion() ); taskInstance.setTaskDefine(taskDefinition); if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); return; } if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskName : preTask) { TaskInstance preTaskInstance = completeTaskList.get(preTaskName); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } private void submitPostNode(String parentNodeName) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskList.containsKey(task.getName())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); } else { addTaskToStandByList(task); } } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskName) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskName)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskName); List<String> depNameList = taskNode.getDepList(); for (String depsNode : depNameList) { if (!dag.containsNode(depsNode) || forbiddenTaskList.containsKey(depsNode) || skipTaskNodeList.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskList.containsKey(depsNode)) { return DependResult.WAITING; } ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskName)) { return DependResult.FAILED; } } logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { if (entry.getValue().getState() == state) { resultList.add(entry.getValue()); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskList.size() > 0) { return true; } return this.dependFailedTask.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @param instance * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); ExecutionStatus state = getProcessInstanceState(instance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); instance.setState(state); processService.updateProcessInstance(instance); processInstance = instance; StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(taskInstance.getName()); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * whether the retry interval is timed out * * @param taskInstance task instance * @return Boolean */ private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) { if (taskInstance.getState() != ExecutionStatus.FAILURE) { return true; } if (taskInstance.getId() == 0 || taskInstance.getMaxRetryTimes() == 0 || taskInstance.getRetryInterval() == 0) { return true; } Date now = new Date(); long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime()); // task retry does not over time, return false return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval; } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(task.getName()); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (retryTaskIntervalOverTime(task)) { TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTask.put(task.getName(), task); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node name list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node name list */ private List<String> getRecoveryNodeNameList() { List<String> recoveryNodeNameList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeIdList)) { for (TaskInstance task : recoverNodeIdList) { recoveryNodeNameList.add(task.getName()); } } return recoveryNodeNameList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeNameList recovery node name list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeNameList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,194
[Bug] [Server] Master build dag error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Perform multi version tasks ![image](https://user-images.githubusercontent.com/42576980/133109069-2b465273-4ebb-4f83-b9b4-ae0f8bce473d.png) ### What you expected to happen Normal execution ### How to reproduce Perform multi version tasks ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6194
https://github.com/apache/dolphinscheduler/pull/6195
7029062f4c2f247e9eac333e28e36e66b03fb435
71e2c8808b3a0871619af4968dfaaa423a90b4c6
"2021-09-13T15:09:21Z"
java
"2021-09-14T03:46:47Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.PreviousTaskNode; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.toMap(taskInstance.getTaskParams(), String.class, Object.class); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int processDefinitionToDB(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog, Boolean isFromProcessDefine) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setFlag(Flag.YES); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { result = processDefineMapper.updateById(processDefinitionLog); } return result; } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { int switchResult = processDefinitionToDB(processDefinition, processDefinitionLog, true); if (switchResult != Constants.DEFINITION_FAILURE) { switchResult = switchProcessTaskRelationVersion(processDefinition); } return switchResult; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } int result = 0; List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); for (ProcessTaskRelationLog processTaskRelationLog : processTaskRelationLogList) { result += processTaskRelationMapper.insert(processTaskRelationLog); } return result; } /** * update task definition */ public int updateTaskDefinition(User operator, Long projectCode, TaskNode taskNode, TaskDefinition taskDefinition) { Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinition.getCode()); Date now = new Date(); taskDefinition.setProjectCode(projectCode); taskDefinition.setUserId(operator.getId()); taskDefinition.setVersion(version == null || version == 0 ? 1 : version + 1); taskDefinition.setUpdateTime(now); setTaskFromTaskNode(taskNode, taskDefinition); int update = taskDefinitionMapper.updateById(taskDefinition); // save task definition log TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setOperateTime(now); int insert = taskDefinitionLogMapper.insert(taskDefinitionLog); return insert & update; } private void setTaskFromTaskNode(TaskNode taskNode, TaskDefinition taskDefinition) { taskDefinition.setName(taskNode.getName()); taskDefinition.setDescription(taskNode.getDesc()); taskDefinition.setTaskType(taskNode.getType().toUpperCase()); taskDefinition.setTaskParams(taskNode.getTaskParams()); taskDefinition.setFlag(taskNode.isForbidden() ? Flag.NO : Flag.YES); taskDefinition.setTaskPriority(taskNode.getTaskInstancePriority()); taskDefinition.setWorkerGroup(taskNode.getWorkerGroup()); taskDefinition.setEnvironmentCode(Objects.isNull(taskNode.getEnvironmentCode()) ? -1 : taskNode.getEnvironmentCode()); taskDefinition.setFailRetryTimes(taskNode.getMaxRetryTimes()); taskDefinition.setFailRetryInterval(taskNode.getRetryInterval()); taskDefinition.setTimeoutFlag(taskNode.getTaskTimeoutParameter().getEnable() ? TimeoutFlag.OPEN : TimeoutFlag.CLOSE); taskDefinition.setTimeoutNotifyStrategy(taskNode.getTaskTimeoutParameter().getStrategy()); taskDefinition.setTimeout(taskNode.getTaskTimeoutParameter().getInterval()); taskDefinition.setDelayTime(taskNode.getDelayTime()); taskDefinition.setResourceIds(getResourceIds(taskDefinition)); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public boolean saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return false; } } newTaskDefinitionLogs.add(taskDefinitionLog); } for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { int insert = taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); int update = taskDefinitionMapper.updateById(taskDefinitionToUpdate); if ((update & insert) != 1) { return false; } } } if (!newTaskDefinitionLogs.isEmpty()) { int insert = taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); int logInsert = taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); return (logInsert & insert) != 0; } return true; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } @Deprecated public List<TaskNode> genTaskNodeList(Long processCode, int processVersion, Map<String, String> locationMap) { List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processCode, processVersion); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); Map<Long, TaskNode> taskNodeMap = new HashMap<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } taskNodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new TaskNode(); v.setCode(processTaskRelation.getPostTaskCode()); v.setVersion(processTaskRelation.getPostTaskVersion()); List<PreviousTaskNode> preTaskNodeList = new ArrayList<>(); if (processTaskRelation.getPreTaskCode() > 0) { preTaskNodeList.add(new PreviousTaskNode(processTaskRelation.getPreTaskCode(), "", processTaskRelation.getPreTaskVersion())); } v.setPreTaskNodeList(preTaskNodeList); } else { List<PreviousTaskNode> preTaskDefinitionList = v.getPreTaskNodeList(); preTaskDefinitionList.add(new PreviousTaskNode(processTaskRelation.getPreTaskCode(), "", processTaskRelation.getPreTaskVersion())); } return v; }); } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream().collect(Collectors.toMap(TaskDefinitionLog::getCode, log -> log)); taskNodeMap.forEach((k, v) -> { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(k); v.setId(locationMap.get(taskDefinitionLog.getName())); v.setCode(taskDefinitionLog.getCode()); v.setName(taskDefinitionLog.getName()); v.setDesc(taskDefinitionLog.getDescription()); v.setType(taskDefinitionLog.getTaskType().toUpperCase()); v.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); v.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); v.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = v.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); v.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); v.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); v.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); v.setParams(JSONUtils.toJsonString(taskParamsMap)); v.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); v.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); v.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); v.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); v.setDelayTime(taskDefinitionLog.getDelayTime()); v.getPreTaskNodeList().forEach(task -> task.setName(taskDefinitionLogMap.get(task.getCode()).getName())); v.setPreTasks(JSONUtils.toJsonString(v.getPreTaskNodeList().stream().map(PreviousTaskNode::getName).collect(Collectors.toList()))); }); return new ArrayList<>(taskNodeMap.values()); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,208
[Bug] [Master] json parse error in sub process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch : dev running a sub process, json parse error in sub process ![image](https://user-images.githubusercontent.com/29528966/133248693-467afe92-1310-441d-b929-2bc4b25810b7.png) ### What you expected to happen sub process run successfully. ### How to reproduce run a sub process. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6208
https://github.com/apache/dolphinscheduler/pull/6211
ac943bca6a3146ea23bdbfe85a09eac68da73945
8694b9c8f989eb5c6b0792e5901cba2a207cdcf0
"2021-09-14T11:24:11Z"
java
"2021-09-15T02:29:26Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.commons.lang.StringUtils; import java.util.regex.Pattern; /** * Constants */ public final class Constants { private Constants() { throw new UnsupportedOperationException("Construct Constants"); } /** * quartz config */ public static final String ORG_QUARTZ_JOBSTORE_DRIVERDELEGATECLASS = "org.quartz.jobStore.driverDelegateClass"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCENAME = "org.quartz.scheduler.instanceName"; public static final String ORG_QUARTZ_SCHEDULER_INSTANCEID = "org.quartz.scheduler.instanceId"; public static final String ORG_QUARTZ_SCHEDULER_MAKESCHEDULERTHREADDAEMON = "org.quartz.scheduler.makeSchedulerThreadDaemon"; public static final String ORG_QUARTZ_JOBSTORE_USEPROPERTIES = "org.quartz.jobStore.useProperties"; public static final String ORG_QUARTZ_THREADPOOL_CLASS = "org.quartz.threadPool.class"; public static final String ORG_QUARTZ_THREADPOOL_THREADCOUNT = "org.quartz.threadPool.threadCount"; public static final String ORG_QUARTZ_THREADPOOL_MAKETHREADSDAEMONS = "org.quartz.threadPool.makeThreadsDaemons"; public static final String ORG_QUARTZ_THREADPOOL_THREADPRIORITY = "org.quartz.threadPool.threadPriority"; public static final String ORG_QUARTZ_JOBSTORE_CLASS = "org.quartz.jobStore.class"; public static final String ORG_QUARTZ_JOBSTORE_TABLEPREFIX = "org.quartz.jobStore.tablePrefix"; public static final String ORG_QUARTZ_JOBSTORE_ISCLUSTERED = "org.quartz.jobStore.isClustered"; public static final String ORG_QUARTZ_JOBSTORE_MISFIRETHRESHOLD = "org.quartz.jobStore.misfireThreshold"; public static final String ORG_QUARTZ_JOBSTORE_CLUSTERCHECKININTERVAL = "org.quartz.jobStore.clusterCheckinInterval"; public static final String ORG_QUARTZ_JOBSTORE_ACQUIRETRIGGERSWITHINLOCK = "org.quartz.jobStore.acquireTriggersWithinLock"; public static final String ORG_QUARTZ_JOBSTORE_DATASOURCE = "org.quartz.jobStore.dataSource"; public static final String ORG_QUARTZ_DATASOURCE_MYDS_CONNECTIONPROVIDER_CLASS = "org.quartz.dataSource.myDs.connectionProvider.class"; /** * quartz config default value */ public static final String QUARTZ_TABLE_PREFIX = "QRTZ_"; public static final String QUARTZ_MISFIRETHRESHOLD = "60000"; public static final String QUARTZ_CLUSTERCHECKININTERVAL = "5000"; public static final String QUARTZ_DATASOURCE = "myDs"; public static final String QUARTZ_THREADCOUNT = "25"; public static final String QUARTZ_THREADPRIORITY = "5"; public static final String QUARTZ_INSTANCENAME = "DolphinScheduler"; public static final String QUARTZ_INSTANCEID = "AUTO"; public static final String QUARTZ_ACQUIRETRIGGERSWITHINLOCK = "true"; /** * common properties path */ public static final String COMMON_PROPERTIES_PATH = "/common.properties"; /** * alter properties */ public static final String ALERT_PLUGIN_BINDING = "alert.plugin.binding"; public static final String ALERT_PLUGIN_DIR = "alert.plugin.dir"; public static final int ALERT_RPC_PORT = 50052; /** * registry properties */ public static final String REGISTRY_DOLPHINSCHEDULER_MASTERS = "/nodes/master"; public static final String REGISTRY_DOLPHINSCHEDULER_WORKERS = "/nodes/worker"; public static final String REGISTRY_DOLPHINSCHEDULER_DEAD_SERVERS = "/dead-servers"; public static final String REGISTRY_DOLPHINSCHEDULER_NODE = "/nodes"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_MASTERS = "/lock/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_MASTERS = "/lock/failover/masters"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_WORKERS = "/lock/failover/workers"; public static final String REGISTRY_DOLPHINSCHEDULER_LOCK_FAILOVER_STARTUP_MASTERS = "/lock/failover/startup-masters"; public static final String REGISTRY_PLUGIN_BINDING = "registry.plugin.binding"; public static final String REGISTRY_PLUGIN_DIR = "registry.plugin.dir"; public static final String REGISTRY_SERVERS = "registry.servers"; /** * fs.defaultFS */ public static final String FS_DEFAULTFS = "fs.defaultFS"; /** * fs s3a endpoint */ public static final String FS_S3A_ENDPOINT = "fs.s3a.endpoint"; /** * fs s3a access key */ public static final String FS_S3A_ACCESS_KEY = "fs.s3a.access.key"; /** * fs s3a secret key */ public static final String FS_S3A_SECRET_KEY = "fs.s3a.secret.key"; /** * hadoop configuration */ public static final String HADOOP_RM_STATE_ACTIVE = "ACTIVE"; public static final String HADOOP_RM_STATE_STANDBY = "STANDBY"; public static final String HADOOP_RESOURCE_MANAGER_HTTPADDRESS_PORT = "resource.manager.httpaddress.port"; /** * yarn.resourcemanager.ha.rm.ids */ public static final String YARN_RESOURCEMANAGER_HA_RM_IDS = "yarn.resourcemanager.ha.rm.ids"; /** * yarn.application.status.address */ public static final String YARN_APPLICATION_STATUS_ADDRESS = "yarn.application.status.address"; /** * yarn.job.history.status.address */ public static final String YARN_JOB_HISTORY_STATUS_ADDRESS = "yarn.job.history.status.address"; /** * hdfs configuration * hdfs.root.user */ public static final String HDFS_ROOT_USER = "hdfs.root.user"; /** * hdfs/s3 configuration * resource.upload.path */ public static final String RESOURCE_UPLOAD_PATH = "resource.upload.path"; /** * data basedir path */ public static final String DATA_BASEDIR_PATH = "data.basedir.path"; /** * dolphinscheduler.env.path */ public static final String DOLPHINSCHEDULER_ENV_PATH = "dolphinscheduler.env.path"; /** * environment properties default path */ public static final String ENV_PATH = "env/dolphinscheduler_env.sh"; /** * python home */ public static final String PYTHON_HOME = "PYTHON_HOME"; /** * resource.view.suffixs */ public static final String RESOURCE_VIEW_SUFFIXS = "resource.view.suffixs"; public static final String RESOURCE_VIEW_SUFFIXS_DEFAULT_VALUE = "txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js"; /** * development.state */ public static final String DEVELOPMENT_STATE = "development.state"; /** * sudo enable */ public static final String SUDO_ENABLE = "sudo.enable"; /** * string true */ public static final String STRING_TRUE = "true"; /** * string false */ public static final String STRING_FALSE = "false"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * comma , */ public static final String COMMA = ","; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; /** * date format of yyyy-MM-dd HH:mm:ss */ public static final String YYYY_MM_DD_HH_MM_SS = "yyyy-MM-dd HH:mm:ss"; /** * date format of yyyyMMddHHmmss */ public static final String YYYYMMDDHHMMSS = "yyyyMMddHHmmss"; /** * date format of yyyyMMddHHmmssSSS */ public static final String YYYYMMDDHHMMSSSSS = "yyyyMMddHHmmssSSS"; /** * http connect time out */ public static final int HTTP_CONNECT_TIMEOUT = 60 * 1000; /** * http connect request time out */ public static final int HTTP_CONNECTION_REQUEST_TIMEOUT = 60 * 1000; /** * httpclient soceket time out */ public static final int SOCKET_TIMEOUT = 60 * 1000; /** * http header */ public static final String HTTP_HEADER_UNKNOWN = "unKnown"; /** * http X-Forwarded-For */ public static final String HTTP_X_FORWARDED_FOR = "X-Forwarded-For"; /** * http X-Real-IP */ public static final String HTTP_X_REAL_IP = "X-Real-IP"; /** * UTF-8 */ public static final String UTF_8 = "UTF-8"; /** * user name regex */ public static final Pattern REGEX_USER_NAME = Pattern.compile("^[a-zA-Z0-9._-]{3,39}$"); /** * email regex */ public static final Pattern REGEX_MAIL_NAME = Pattern.compile("^([a-z0-9A-Z]+[_|\\-|\\.]?)+[a-z0-9A-Z]@([a-z0-9A-Z]+(-[a-z0-9A-Z]+)?\\.)+[a-zA-Z]{2,}$"); /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * read permission */ public static final int READ_PERMISSION = 2 * 1; /** * write permission */ public static final int WRITE_PERMISSION = 2 * 2; /** * execute permission */ public static final int EXECUTE_PERMISSION = 1; /** * default admin permission */ public static final int DEFAULT_ADMIN_PERMISSION = 7; /** * all permissions */ public static final int ALL_PERMISSIONS = READ_PERMISSION | WRITE_PERMISSION | EXECUTE_PERMISSION; /** * max task timeout */ public static final int MAX_TASK_TIMEOUT = 24 * 3600; /** * master cpu load */ public static final int DEFAULT_MASTER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker cpu load */ public static final int DEFAULT_WORKER_CPU_LOAD = Runtime.getRuntime().availableProcessors() * 2; /** * worker host weight */ public static final int DEFAULT_WORKER_HOST_WEIGHT = 100; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * time unit secong to minutes */ public static final int SEC_2_MINUTES_TIME_UNIT = 60; /*** * * rpc port */ public static final int RPC_PORT = 50051; /** * forbid running task */ public static final String FLOWNODE_RUN_FLAG_FORBIDDEN = "FORBIDDEN"; /** * normal running task */ public static final String FLOWNODE_RUN_FLAG_NORMAL = "NORMAL"; /** * datasource configuration path */ public static final String DATASOURCE_PROPERTIES = "/datasource.properties"; public static final String COMMON_TASK_TYPE = "common"; public static final String DEFAULT = "default"; public static final String USER = "user"; public static final String PASSWORD = "password"; public static final String XXXXXX = "******"; public static final String NULL = "NULL"; public static final String THREAD_NAME_MASTER_SERVER = "Master-Server"; public static final String THREAD_NAME_WORKER_SERVER = "Worker-Server"; /** * command parameter keys */ public static final String CMD_PARAM_RECOVER_PROCESS_ID_STRING = "ProcessInstanceId"; public static final String CMD_PARAM_RECOVERY_START_NODE_STRING = "StartNodeIdList"; public static final String CMD_PARAM_RECOVERY_WAITING_THREAD = "WaitingThreadInstanceId"; public static final String CMD_PARAM_SUB_PROCESS = "processInstanceId"; public static final String CMD_PARAM_EMPTY_SUB_PROCESS = "0"; public static final String CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID = "parentProcessInstanceId"; public static final String CMD_PARAM_SUB_PROCESS_DEFINE_ID = "processDefinitionId"; public static final String CMD_PARAM_START_NODE_NAMES = "StartNodeNameList"; public static final String CMD_PARAM_START_PARAMS = "StartParams"; public static final String CMD_PARAM_FATHER_PARAMS = "fatherParams"; /** * complement data start date */ public static final String CMDPARAM_COMPLEMENT_DATA_START_DATE = "complementStartDate"; /** * complement data end date */ public static final String CMDPARAM_COMPLEMENT_DATA_END_DATE = "complementEndDate"; /** * data source config */ public static final String SPRING_DATASOURCE_DRIVER_CLASS_NAME = "spring.datasource.driver-class-name"; public static final String SPRING_DATASOURCE_URL = "spring.datasource.url"; public static final String SPRING_DATASOURCE_USERNAME = "spring.datasource.username"; public static final String SPRING_DATASOURCE_PASSWORD = "spring.datasource.password"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY_TIMEOUT = "spring.datasource.validationQueryTimeout"; public static final String SPRING_DATASOURCE_INITIAL_SIZE = "spring.datasource.initialSize"; public static final String SPRING_DATASOURCE_MIN_IDLE = "spring.datasource.minIdle"; public static final String SPRING_DATASOURCE_MAX_ACTIVE = "spring.datasource.maxActive"; public static final String SPRING_DATASOURCE_MAX_WAIT = "spring.datasource.maxWait"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_EVICTION_RUNS_MILLIS = "spring.datasource.timeBetweenEvictionRunsMillis"; public static final String SPRING_DATASOURCE_TIME_BETWEEN_CONNECT_ERROR_MILLIS = "spring.datasource.timeBetweenConnectErrorMillis"; public static final String SPRING_DATASOURCE_MIN_EVICTABLE_IDLE_TIME_MILLIS = "spring.datasource.minEvictableIdleTimeMillis"; public static final String SPRING_DATASOURCE_VALIDATION_QUERY = "spring.datasource.validationQuery"; public static final String SPRING_DATASOURCE_TEST_WHILE_IDLE = "spring.datasource.testWhileIdle"; public static final String SPRING_DATASOURCE_TEST_ON_BORROW = "spring.datasource.testOnBorrow"; public static final String SPRING_DATASOURCE_TEST_ON_RETURN = "spring.datasource.testOnReturn"; public static final String SPRING_DATASOURCE_POOL_PREPARED_STATEMENTS = "spring.datasource.poolPreparedStatements"; public static final String SPRING_DATASOURCE_DEFAULT_AUTO_COMMIT = "spring.datasource.defaultAutoCommit"; public static final String SPRING_DATASOURCE_KEEP_ALIVE = "spring.datasource.keepAlive"; public static final String SPRING_DATASOURCE_MAX_POOL_PREPARED_STATEMENT_PER_CONNECTION_SIZE = "spring.datasource.maxPoolPreparedStatementPerConnectionSize"; public static final String DEVELOPMENT = "development"; public static final String QUARTZ_PROPERTIES_PATH = "quartz.properties"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * master task instance cache-database refresh interval */ public static final int CACHE_REFRESH_TIME_MILLIS = 20 * 1000; /** * heartbeat for zk info length */ public static final int HEARTBEAT_FOR_ZOOKEEPER_INFO_LENGTH = 10; public static final int HEARTBEAT_WITH_WEIGHT_FOR_ZOOKEEPER_INFO_LENGTH = 11; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * -D mapreduce.job.name=name */ public static final String MR_NAME = "mapreduce.job.name"; /** * -D mapreduce.job.queuename=queuename */ public static final String MR_QUEUE = "mapreduce.job.queuename"; /** * spark params constant */ public static final String MASTER = "--master"; public static final String DEPLOY_MODE = "--deploy-mode"; /** * --class CLASS_NAME */ public static final String MAIN_CLASS = "--class"; /** * --driver-cores NUM */ public static final String DRIVER_CORES = "--driver-cores"; /** * --driver-memory MEM */ public static final String DRIVER_MEMORY = "--driver-memory"; /** * --num-executors NUM */ public static final String NUM_EXECUTORS = "--num-executors"; /** * --executor-cores NUM */ public static final String EXECUTOR_CORES = "--executor-cores"; /** * --executor-memory MEM */ public static final String EXECUTOR_MEMORY = "--executor-memory"; /** * --name NAME */ public static final String SPARK_NAME = "--name"; /** * --queue QUEUE */ public static final String SPARK_QUEUE = "--queue"; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * process or task definition failure */ public static final int DEFINITION_FAILURE = -1; /** * process or task definition first version */ public static final int VERSION_FIRST = 1; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * the absolute path of current executing task */ public static final String PARAMETER_TASK_EXECUTE_PATH = "system.task.execute.path"; /** * the instance id of current task */ public static final String PARAMETER_TASK_INSTANCE_ID = "system.task.instance.id"; /** * ACCEPTED */ public static final String ACCEPTED = "ACCEPTED"; /** * SUCCEEDED */ public static final String SUCCEEDED = "SUCCEEDED"; /** * NEW */ public static final String NEW = "NEW"; /** * NEW_SAVING */ public static final String NEW_SAVING = "NEW_SAVING"; /** * SUBMITTED */ public static final String SUBMITTED = "SUBMITTED"; /** * FAILED */ public static final String FAILED = "FAILED"; /** * KILLED */ public static final String KILLED = "KILLED"; /** * RUNNING */ public static final String RUNNING = "RUNNING"; /** * underline "_" */ public static final String UNDERLINE = "_"; /** * quartz job prifix */ public static final String QUARTZ_JOB_PRIFIX = "job"; /** * quartz job group prifix */ public static final String QUARTZ_JOB_GROUP_PRIFIX = "jobgroup"; /** * projectId */ public static final String PROJECT_ID = "projectId"; /** * processId */ public static final String SCHEDULE_ID = "scheduleId"; /** * schedule */ public static final String SCHEDULE = "schedule"; /** * application regex */ public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; public static final String PID = OSUtils.isWindows() ? "handle" : "pid"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String SWITCH_RESULT = "switchResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String RWXR_XR_X = "rwxr-xr-x"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * master/worker server use for zk */ public static final String MASTER_TYPE = "master"; public static final String WORKER_TYPE = "worker"; public static final String DELETE_OP = "delete"; public static final String ADD_OP = "add"; public static final String ALIAS = "alias"; public static final String CONTENT = "content"; public static final String DEPENDENT_SPLIT = ":||"; public static final String DEPENDENT_ALL = "ALL"; /** * preview schedule execute count */ public static final int PREVIEW_SCHEDULE_EXECUTE_COUNT = 5; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; /** * com.amazonaws.services.s3.enableV4 */ public static final String AWS_S3_V4 = "com.amazonaws.services.s3.enableV4"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * hive conf */ public static final String HIVE_CONF = "hiveconf:"; /** * flink */ public static final String FLINK_YARN_CLUSTER = "yarn-cluster"; public static final String FLINK_RUN_MODE = "-m"; public static final String FLINK_YARN_SLOT = "-ys"; public static final String FLINK_APP_NAME = "-ynm"; public static final String FLINK_QUEUE = "-yqu"; public static final String FLINK_TASK_MANAGE = "-yn"; public static final String FLINK_JOB_MANAGE_MEM = "-yjm"; public static final String FLINK_TASK_MANAGE_MEM = "-ytm"; public static final String FLINK_MAIN_CLASS = "-c"; public static final String FLINK_PARALLELISM = "-p"; public static final String FLINK_SHUTDOWN_ON_ATTACHED_EXIT = "-sae"; public static final int[] NOT_TERMINATED_STATES = new int[] { ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal(), ExecutionStatus.NEED_FAULT_TOLERANCE.ordinal(), ExecutionStatus.WAITING_THREAD.ordinal(), ExecutionStatus.WAITING_DEPEND.ordinal() }; /** * status */ public static final String STATUS = "status"; /** * message */ public static final String MSG = "msg"; /** * data total */ public static final String COUNT = "count"; /** * page size */ public static final String PAGE_SIZE = "pageSize"; /** * current page no */ public static final String PAGE_NUMBER = "pageNo"; /** * */ public static final String DATA_LIST = "data"; public static final String TOTAL_LIST = "totalList"; public static final String CURRENT_PAGE = "currentPage"; public static final String TOTAL_PAGE = "totalPage"; public static final String TOTAL = "total"; /** * workflow */ public static final String WORKFLOW_LIST = "workFlowList"; public static final String WORKFLOW_RELATION_LIST = "workFlowRelationList"; /** * session user */ public static final String SESSION_USER = "session.user"; public static final String SESSION_ID = "sessionId"; public static final String PASSWORD_DEFAULT = "******"; /** * locale */ public static final String LOCALE_LANGUAGE = "language"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * database type */ public static final String MYSQL = "MYSQL"; public static final String POSTGRESQL = "POSTGRESQL"; public static final String HIVE = "HIVE"; public static final String SPARK = "SPARK"; public static final String CLICKHOUSE = "CLICKHOUSE"; public static final String ORACLE = "ORACLE"; public static final String SQLSERVER = "SQLSERVER"; public static final String DB2 = "DB2"; public static final String PRESTO = "PRESTO"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; public static final String ADDRESS = "address"; public static final String DATABASE = "database"; public static final String JDBC_URL = "jdbcUrl"; public static final String PRINCIPAL = "principal"; public static final String OTHER = "other"; public static final String ORACLE_DB_CONNECT_TYPE = "connectType"; public static final String KERBEROS_KRB5_CONF_PATH = "javaSecurityKrb5Conf"; public static final String KERBEROS_KEY_TAB_USERNAME = "loginUserKeytabUsername"; public static final String KERBEROS_KEY_TAB_PATH = "loginUserKeytabPath"; /** * session timeout */ public static final int SESSION_TIME_OUT = 7200; public static final int MAX_FILE_SIZE = 1024 * 1024 * 1024; public static final String UDF = "UDF"; public static final String CLASS = "class"; public static final String RECEIVERS = "receivers"; public static final String RECEIVERS_CC = "receiversCc"; /** * dataSource sensitive param */ public static final String DATASOURCE_PASSWORD_REGEX = "(?<=((?i)password((\\\\\":\\\\\")|(=')))).*?(?=((\\\\\")|(')))"; /** * default worker group */ public static final String DEFAULT_WORKER_GROUP = "default"; public static final Integer TASK_INFO_LENGTH = 5; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * authorize writable perm */ public static final int AUTHORIZE_WRITABLE_PERM = 7; /** * authorize readable perm */ public static final int AUTHORIZE_READABLE_PERM = 4; /** * plugin configurations */ public static final String PLUGIN_JAR_SUFFIX = ".jar"; public static final int NORMAL_NODE_STATUS = 0; public static final int ABNORMAL_NODE_STATUS = 1; public static final String START_TIME = "start time"; public static final String END_TIME = "end time"; public static final String START_END_DATE = "startDate,endDate"; /** * system line separator */ public static final String SYSTEM_LINE_SEPARATOR = System.getProperty("line.separator"); public static final String EXCEL_SUFFIX_XLS = ".xls"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * network interface preferred */ public static final String DOLPHIN_SCHEDULER_NETWORK_INTERFACE_PREFERRED = "dolphin.scheduler.network.interface.preferred"; /** * network IP gets priority, default inner outer */ public static final String DOLPHIN_SCHEDULER_NETWORK_PRIORITY_STRATEGY = "dolphin.scheduler.network.priority.strategy"; /** * exec shell scripts */ public static final String SH = "sh"; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; /** * snow flake, data center id, this id must be greater than 0 and less than 32 */ public static final String SNOW_FLAKE_DATA_CENTER_ID = "data.center.id"; /** * docker & kubernetes */ public static final boolean DOCKER_MODE = !StringUtils.isEmpty(System.getenv("DOCKER")); public static final boolean KUBERNETES_MODE = !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_HOST")) && !StringUtils.isEmpty(System.getenv("KUBERNETES_SERVICE_PORT")); /** * task parameter keys */ public static final String TASK_PARAMS = "params"; public static final String TASK_PARAMS_DATASOURCE = "datasource"; public static final String TASK_PARAMS_DATASOURCE_NAME = "datasourceName"; public static final String TASK_DEPENDENCE = "dependence"; public static final String TASK_DEPENDENCE_DEPEND_TASK_LIST = "dependTaskList"; public static final String TASK_DEPENDENCE_DEPEND_ITEM_LIST = "dependItemList"; public static final String TASK_DEPENDENCE_PROJECT_ID = "projectId"; public static final String TASK_DEPENDENCE_PROJECT_NAME = "projectName"; public static final String TASK_DEPENDENCE_DEFINITION_ID = "definitionId"; public static final String TASK_DEPENDENCE_DEFINITION_NAME = "definitionName"; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,208
[Bug] [Master] json parse error in sub process
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch : dev running a sub process, json parse error in sub process ![image](https://user-images.githubusercontent.com/29528966/133248693-467afe92-1310-441d-b929-2bc4b25810b7.png) ### What you expected to happen sub process run successfully. ### How to reproduce run a sub process. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6208
https://github.com/apache/dolphinscheduler/pull/6211
ac943bca6a3146ea23bdbfe85a09eac68da73945
8694b9c8f989eb5c6b0792e5901cba2a207cdcf0
"2021-09-14T11:24:11Z"
java
"2021-09-15T02:29:26Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.model; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.commons.lang.StringUtils; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; public class TaskNode { /** * task node id */ private String id; /** * task node code */ private long code; /** * task node version */ private int version; /** * task node name */ private String name; /** * task node description */ private String desc; /** * task node type */ private String type; /** * the run flag has two states, NORMAL or FORBIDDEN */ private String runFlag; /** * the front field */ private String loc; /** * maximum number of retries */ private int maxRetryTimes; /** * Unit of retry interval: points */ private int retryInterval; /** * params information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String params; /** * inner dependency information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String preTasks; /** * node dependency list */ private List<PreviousTaskNode> preTaskNodeList; /** * users store additional information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String extras; /** * node dependency list */ private List<String> depList; /** * outer dependency information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String dependence; @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String conditionResult; @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String switchResult; /** * task instance priority */ private Priority taskInstancePriority; /** * worker group */ private String workerGroup; /** * environment code */ private Long environmentCode; /** * task time out */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String timeout; /** * delay execution time. */ private int delayTime; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getDesc() { return desc; } public void setDesc(String desc) { this.desc = desc; } public String getType() { return type; } public void setType(String type) { this.type = type; } public String getParams() { return params; } public void setParams(String params) { this.params = params; } public String getPreTasks() { return preTasks; } public void setPreTasks(String preTasks) { this.preTasks = preTasks; this.depList = JSONUtils.toList(preTasks, String.class); } public String getExtras() { return extras; } public void setExtras(String extras) { this.extras = extras; } public List<String> getDepList() { return depList; } public void setDepList(List<String> depList) { if (depList != null) { this.depList = depList; this.preTasks = JSONUtils.toJsonString(depList); } } public String getLoc() { return loc; } public void setLoc(String loc) { this.loc = loc; } public String getRunFlag() { return runFlag; } public void setRunFlag(String runFlag) { this.runFlag = runFlag; } public Boolean isForbidden() { return (!StringUtils.isEmpty(this.runFlag) && this.runFlag.equals(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN)); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TaskNode taskNode = (TaskNode) o; return Objects.equals(name, taskNode.name) && Objects.equals(desc, taskNode.desc) && Objects.equals(type, taskNode.type) && Objects.equals(params, taskNode.params) && Objects.equals(preTasks, taskNode.preTasks) && Objects.equals(extras, taskNode.extras) && Objects.equals(runFlag, taskNode.runFlag) && Objects.equals(dependence, taskNode.dependence) && Objects.equals(workerGroup, taskNode.workerGroup) && Objects.equals(environmentCode, taskNode.environmentCode) && Objects.equals(conditionResult, taskNode.conditionResult) && CollectionUtils.equalLists(depList, taskNode.depList); } @Override public int hashCode() { return Objects.hash(name, desc, type, params, preTasks, extras, depList, runFlag); } public String getDependence() { return dependence; } public void setDependence(String dependence) { this.dependence = dependence; } public int getMaxRetryTimes() { return maxRetryTimes; } public void setMaxRetryTimes(int maxRetryTimes) { this.maxRetryTimes = maxRetryTimes; } public int getRetryInterval() { return retryInterval; } public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } public Priority getTaskInstancePriority() { return taskInstancePriority; } public void setTaskInstancePriority(Priority taskInstancePriority) { this.taskInstancePriority = taskInstancePriority; } public String getTimeout() { return timeout; } public void setTimeout(String timeout) { this.timeout = timeout; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public String getConditionResult() { return conditionResult; } public void setConditionResult(String conditionResult) { this.conditionResult = conditionResult; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime; } public long getCode() { return code; } public void setCode(long code) { this.code = code; } public int getVersion() { return version; } public void setVersion(int version) { this.version = version; } /** * get task time out parameter * * @return task time out parameter */ public TaskTimeoutParameter getTaskTimeoutParameter() { if (!StringUtils.isEmpty(this.getTimeout())) { String formatStr = String.format("%s,%s", TaskTimeoutStrategy.WARN.name(), TaskTimeoutStrategy.FAILED.name()); String taskTimeout = this.getTimeout().replace(formatStr, TaskTimeoutStrategy.WARNFAILED.name()); return JSONUtils.parseObject(taskTimeout, TaskTimeoutParameter.class); } return new TaskTimeoutParameter(false); } public boolean isConditionsTask() { return TaskType.CONDITIONS.getDesc().equalsIgnoreCase(this.getType()); } public boolean isSwitchTask() { return TaskType.SWITCH.toString().equalsIgnoreCase(this.getType()); } public List<PreviousTaskNode> getPreTaskNodeList() { return preTaskNodeList; } public void setPreTaskNodeList(List<PreviousTaskNode> preTaskNodeList) { this.preTaskNodeList = preTaskNodeList; } public String getTaskParams() { Map<String, Object> taskParams = JSONUtils.toMap(this.params, String.class, Object.class); if (taskParams == null) { taskParams = new HashMap<>(); } taskParams.put(Constants.CONDITION_RESULT, this.conditionResult); taskParams.put(Constants.DEPENDENCE, this.dependence); taskParams.put(Constants.SWITCH_RESULT, this.switchResult); return JSONUtils.toJsonString(taskParams); } public Map<String, Object> taskParamsToJsonObj(String taskParams) { Map<String, Object> taskParamsMap = JSONUtils.toMap(taskParams, String.class, Object.class); if (taskParamsMap == null) { taskParamsMap = new HashMap<>(); } return taskParamsMap; } @Override public String toString() { return "TaskNode{" + "id='" + id + '\'' + ", code=" + code + ", version=" + version + ", name='" + name + '\'' + ", desc='" + desc + '\'' + ", type='" + type + '\'' + ", runFlag='" + runFlag + '\'' + ", loc='" + loc + '\'' + ", maxRetryTimes=" + maxRetryTimes + ", retryInterval=" + retryInterval + ", params='" + params + '\'' + ", preTasks='" + preTasks + '\'' + ", preTaskNodeList=" + preTaskNodeList + ", extras='" + extras + '\'' + ", depList=" + depList + ", dependence='" + dependence + '\'' + ", conditionResult='" + conditionResult + '\'' + ", taskInstancePriority=" + taskInstancePriority + ", workerGroup='" + workerGroup + '\'' + ", environmentCode=" + environmentCode + ", timeout='" + timeout + '\'' + ", delayTime=" + delayTime + '}'; } public void setEnvironmentCode(Long environmentCode) { this.environmentCode = environmentCode; } public Long getEnvironmentCode() { return this.environmentCode; } public String getSwitchResult() { return switchResult; } public void setSwitchResult(String switchResult) { this.switchResult = switchResult; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,158
[Question][dev] Local dev environment error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description Use CreateDolphinScheduler to create a table error.t_ds_task_definition and t_ds_task_definition_log are not created in the sql\upgrade\1.4.0_schema\mysql\dolphinscheduler_ddl.sql file, but new fields are added. ### DolphinScheduler Version -dev ### Additional context ![企业微信截图_16312404385756](https://user-images.githubusercontent.com/56899730/132788810-cbcf58c7-d3ec-426b-9809-ec5f5ea1ee4d.png) ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6158
https://github.com/apache/dolphinscheduler/pull/6210
8694b9c8f989eb5c6b0792e5901cba2a207cdcf0
6820684307f11dd28731dcae8ec9868615a6ed7f
"2021-09-10T02:24:48Z"
java
"2021-09-15T04:03:15Z"
sql/upgrade/1.4.0_schema/mysql/dolphinscheduler_ddl.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET sql_mode=(SELECT REPLACE(@@sql_mode,'ONLY_FULL_GROUP_BY','')); -- uc_dolphin_T_t_ds_user_A_state drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_user_A_state; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_user_A_state() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_user' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='state') THEN ALTER TABLE t_ds_user ADD `state` int(1) DEFAULT 1 COMMENT 'state 0:disable 1:enable'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_user_A_state; DROP PROCEDURE uc_dolphin_T_t_ds_user_A_state; -- uc_dolphin_T_t_ds_tenant_A_tenant_name drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_tenant_A_tenant_name; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name() BEGIN IF EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_tenant' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='tenant_name') THEN ALTER TABLE t_ds_tenant DROP `tenant_name`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_tenant_A_tenant_name; DROP PROCEDURE uc_dolphin_T_t_ds_tenant_A_tenant_name; -- uc_dolphin_T_t_ds_task_instance_A_first_submit_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_first_submit_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='first_submit_time') THEN ALTER TABLE t_ds_task_instance ADD `first_submit_time` datetime DEFAULT NULL COMMENT 'task first submit time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_first_submit_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_first_submit_time; -- uc_dolphin_T_t_ds_task_instance_A_delay_time drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_delay_time; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='delay_time') THEN ALTER TABLE t_ds_task_instance ADD `delay_time` int(4) DEFAULT '0' COMMENT 'task delay execution time'; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_delay_time(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_delay_time; -- uc_dolphin_T_t_ds_task_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_task_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_task_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_task_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_task_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_instance_A_var_pool drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_instance_A_var_pool; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_instance' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='var_pool') THEN ALTER TABLE t_ds_process_instance ADD `var_pool` longtext NULL; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_instance_A_var_pool(); DROP PROCEDURE uc_dolphin_T_t_ds_process_instance_A_var_pool; -- uc_dolphin_T_t_ds_process_definition_A_modify_by drop PROCEDURE if EXISTS ct_dolphin_T_t_ds_process_definition_version; delimiter d// CREATE PROCEDURE ct_dolphin_T_t_ds_process_definition_version() BEGIN CREATE TABLE IF NOT EXISTS `t_ds_process_definition_version` ( `id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'key', `process_definition_id` int(11) NOT NULL COMMENT 'process definition id', `version` int(11) DEFAULT NULL COMMENT 'process definition version', `process_definition_json` longtext COMMENT 'process definition json content', `description` text, `global_params` text COMMENT 'global parameters', `locations` text COMMENT 'Node location information', `connects` text COMMENT 'Node connection information', `receivers` text COMMENT 'receivers', `receivers_cc` text COMMENT 'cc', `create_time` datetime DEFAULT NULL COMMENT 'create time', `timeout` int(11) DEFAULT '0' COMMENT 'time out', `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids', PRIMARY KEY (`id`), UNIQUE KEY `process_definition_id_and_version` (`process_definition_id`,`version`) USING BTREE, KEY `process_definition_index` (`id`) USING BTREE ) ENGINE=InnoDB AUTO_INCREMENT=84 DEFAULT CHARSET=utf8; END; d// delimiter ; CALL ct_dolphin_T_t_ds_process_definition_version; DROP PROCEDURE ct_dolphin_T_t_ds_process_definition_version; -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS `t_ds_plugin_define`; CREATE TABLE `t_ds_plugin_define` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_name` varchar(100) NOT NULL COMMENT 'the name of plugin eg: email', `plugin_type` varchar(100) NOT NULL COMMENT 'plugin type . alert=alert plugin, job=job plugin', `plugin_params` text COMMENT 'plugin params', `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `t_ds_plugin_define_UN` (`plugin_name`,`plugin_type`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=utf8; -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS `t_ds_alert_plugin_instance`; CREATE TABLE `t_ds_alert_plugin_instance` ( `id` int NOT NULL AUTO_INCREMENT, `plugin_define_id` int NOT NULL, `plugin_instance_params` text COMMENT 'plugin instance params. Also contain the params value which user input in web ui.', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, `instance_name` varchar(200) DEFAULT NULL COMMENT 'alert instance name', PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; -- uc_dolphin_T_t_ds_process_definition_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_A_warning_group_id; -- uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_process_definition_version' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='warning_group_id') THEN ALTER TABLE t_ds_process_definition_version ADD COLUMN `warning_group_id` int(11) DEFAULT NULL COMMENT 'alert group id' AFTER `connects`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id(); DROP PROCEDURE uc_dolphin_T_t_ds_process_definition_version_A_warning_group_id; -- uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='alert_instance_ids') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `alert_instance_ids` varchar (255) DEFAULT NULL COMMENT 'alert instance ids' AFTER `id`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_alert_instance_ids; -- uc_dolphin_T_t_ds_alertgroup_A_create_user_id drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_create_user_id; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='create_user_id') THEN ALTER TABLE t_ds_alertgroup ADD COLUMN `create_user_id` int(11) DEFAULT NULL COMMENT 'create user id' AFTER `alert_instance_ids`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_create_user_id(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_create_user_id; -- uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_alertgroup' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_alertgroup_name_un') THEN ALTER TABLE t_ds_alertgroup ADD UNIQUE KEY `t_ds_alertgroup_name_un` (`group_name`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName(); DROP PROCEDURE uc_dolphin_T_t_ds_alertgroup_A_add_UN_groupName; -- uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.STATISTICS WHERE TABLE_NAME='t_ds_datasource' AND TABLE_SCHEMA=(SELECT DATABASE()) AND INDEX_NAME ='t_ds_datasource_name_un') THEN ALTER TABLE t_ds_datasource ADD UNIQUE KEY `t_ds_datasource_name_un` (`name`, `type`); END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName(); DROP PROCEDURE uc_dolphin_T_t_ds_datasource_A_add_UN_datasourceName; -- uc_dolphin_T_t_ds_schedules_A_add_timezone drop PROCEDURE if EXISTS uc_dolphin_T_t_ds_schedules_A_add_timezone; delimiter d// CREATE PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone() BEGIN IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS WHERE TABLE_NAME='t_ds_schedules' AND TABLE_SCHEMA=(SELECT DATABASE()) AND COLUMN_NAME ='timezone_id') THEN ALTER TABLE t_ds_schedules ADD COLUMN `timezone_id` varchar(40) default NULL COMMENT 'schedule timezone id' AFTER `end_time`; END IF; END; d// delimiter ; CALL uc_dolphin_T_t_ds_schedules_A_add_timezone(); DROP PROCEDURE uc_dolphin_T_t_ds_schedules_A_add_timezone; -- ---------------------------- -- Table structure for t_ds_environment -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment`; CREATE TABLE `t_ds_environment` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `code` bigint(20) DEFAULT NULL COMMENT 'encoding', `name` varchar(100) NOT NULL COMMENT 'environment config name', `config` text NULL DEFAULT NULL COMMENT 'this config contains many environment variables config', `description` text NULL DEFAULT NULL COMMENT 'the details', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_name_unique` (`name`), UNIQUE KEY `environment_code_unique` (`code`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; ALTER TABLE t_ds_task_definition ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_definition_log ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_error_command ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_schedules ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_process_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_instance ADD COLUMN `environment_code` bigint(20) default '-1' COMMENT 'environment code' AFTER `worker_group`; ALTER TABLE t_ds_task_instance ADD COLUMN `environment_config` text COMMENT 'environment config' AFTER `environment_code`; -- ---------------------------- -- Table structure for t_ds_environment_worker_group_relation -- ---------------------------- DROP TABLE IF EXISTS `t_ds_environment_worker_group_relation`; CREATE TABLE `t_ds_environment_worker_group_relation` ( `id` bigint(11) NOT NULL AUTO_INCREMENT COMMENT 'id', `environment_code` bigint(20) NOT NULL COMMENT 'environment code', `worker_group` varchar(255) NOT NULL COMMENT 'worker group id', `operator` int(11) DEFAULT NULL COMMENT 'operator user id', `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP, `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (`id`), UNIQUE KEY `environment_worker_group_unique` (`environment_code`,`worker_group`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; -- ---------------------------- -- These columns will not be used in the new version,if you determine that the historical data is useless, you can delete it using the sql below -- ---------------------------- -- ALTER TABLE t_ds_alert DROP `show_type`, DROP `alert_type`, DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_alertgroup DROP `group_type`; -- ALTER TABLE t_ds_process_definition DROP `receivers`, DROP `receivers_cc`; -- ALTER TABLE t_ds_process_definition_version DROP `receivers`, DROP `receivers_cc`; -- DROP TABLE IF EXISTS t_ds_relation_user_alertgroup; -- ALTER TABLE t_ds_command DROP `dependence`; -- ALTER TABLE t_ds_error_command DROP `dependence`;
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper"> <sql id="baseSql"> id, `name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, operator, operate_time, create_time, update_time </sql> <select id="queryByProcessCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog"> select <include refid="baseSql"/> from t_ds_process_task_relation_log WHERE process_definition_code = #{processCode} and process_definition_version = #{processVersion} </select> <insert id="batchInsert"> insert into t_ds_process_task_relation_log (`name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, operator, operate_time, create_time, update_time) values <foreach collection="taskRelationList" item="relation" separator=","> (#{relation.name},#{relation.processDefinitionVersion},#{relation.projectCode},#{relation.processDefinitionCode}, #{relation.preTaskCode},#{relation.preTaskVersion},#{relation.postTaskCode},#{relation.postTaskVersion}, #{relation.conditionType},#{relation.conditionParams},#{relation.operator},#{relation.operateTime}, #{relation.createTime},#{relation.updateTime}) </foreach> </insert> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper"> <sql id="baseSql"> id, `name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, create_time, update_time </sql> <select id="queryByProcessCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE project_code = #{projectCode} and process_definition_code = #{processCode} </select> <select id="queryByTaskCode" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE pre_task_code = #{taskCode} <if test="taskCode != 0"> or post_task_code = #{taskCode} </if> </select> <select id="queryByTaskCodes" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation"> select <include refid="baseSql"/> from t_ds_process_task_relation WHERE 1 = 1 <if test="taskCodes != null and taskCodes.length != 0"> and pre_task_code in <foreach collection="taskCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> or post_task_code in <foreach collection="taskCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> </select> <delete id="deleteByCode"> delete from t_ds_process_task_relation WHERE project_code = #{projectCode} and process_definition_code = #{processCode} </delete> <insert id="batchInsert"> insert into t_ds_process_task_relation (`name`, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, create_time, update_time) values <foreach collection="taskRelationList" item="relation" separator=","> (#{relation.name},#{relation.processDefinitionVersion},#{relation.projectCode},#{relation.processDefinitionCode}, #{relation.preTaskCode},#{relation.preTaskVersion},#{relation.postTaskCode},#{relation.postTaskVersion}, #{relation.conditionType},#{relation.conditionParams},#{relation.createTime},#{relation.updateTime}) </foreach> </insert> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/TaskDefinitionLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper"> <sql id="baseSql"> id, code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, environment_code, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, operator, operate_time, create_time, update_time </sql> <select id="queryMaxVersionForDefinition" resultType="java.lang.Integer"> select max(version) from t_ds_task_definition_log WHERE code = #{code} </select> <select id="queryByDefinitionCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE code = #{code} and version = #{version} </select> <select id="queryByTaskDefinitions" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE 1 = 1 <if test="taskDefinitions != null and taskDefinitions.size != 0"> and <foreach collection="taskDefinitions" index="index" item="item" open="(" separator=" or " close=")"> (code = #{item.code} and version = #{item.version}) </foreach> </if> </select> <insert id="batchInsert"> insert into t_ds_task_definition_log (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, operator, operate_time, create_time, update_time) values <foreach collection="taskDefinitionLogs" item="taskDefinitionLog" separator=","> (#{taskDefinitionLog.code},#{taskDefinitionLog.name},#{taskDefinitionLog.version},#{taskDefinitionLog.description}, #{taskDefinitionLog.projectCode},#{taskDefinitionLog.userId},#{taskDefinitionLog.taskType},#{taskDefinitionLog.taskParams}, #{taskDefinitionLog.flag},#{taskDefinitionLog.taskPriority},#{taskDefinitionLog.workerGroup},#{taskDefinitionLog.failRetryTimes}, #{taskDefinitionLog.failRetryInterval},#{taskDefinitionLog.timeoutFlag},#{taskDefinitionLog.timeoutNotifyStrategy},#{taskDefinitionLog.timeout}, #{taskDefinitionLog.delayTime},#{taskDefinitionLog.resourceIds},#{taskDefinitionLog.operator},#{taskDefinitionLog.operateTime}, #{taskDefinitionLog.createTime},#{taskDefinitionLog.updateTime}) </foreach> </insert> <delete id="deleteByCodeAndVersion"> delete from t_ds_task_definition_log where code = #{code} and version = #{version} </delete> <select id="queryTaskDefinitionVersionsPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log where code = #{code} order by version desc </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/TaskDefinitionMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper"> <sql id="baseSql"> id, code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, environment_code, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time </sql> <select id="queryByName" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition WHERE project_code = #{projectCode} and `name` = #{name} </select> <select id="queryAllDefinitionList" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition where project_code = #{projectCode} order by create_time desc </select> <select id="countDefinitionGroupByUser" resultType="org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser"> SELECT td.user_id as user_id, tu.user_name as user_name, count(0) as count FROM t_ds_task_definition td JOIN t_ds_user tu on tu.id=td.user_id where 1 = 1 <if test="projectCodes != null and projectCodes.length != 0"> and td.project_code in <foreach collection="projectCodes" index="index" item="i" open="(" separator="," close=")"> #{i} </foreach> </if> group by td.user_id,tu.user_name </select> <select id="queryByCode" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select <include refid="baseSql"/> from t_ds_task_definition where code = #{code} </select> <select id="listResources" resultType="java.util.HashMap"> SELECT id,resource_ids FROM t_ds_task_definition WHERE resource_ids is not null and resource_ids != '' </select> <select id="listResourcesByUser" resultType="java.util.HashMap"> SELECT id,resource_ids FROM t_ds_task_definition WHERE user_id = #{userId} and resource_ids is not null and resource_ids != '' </select> <delete id="deleteByCode"> delete from t_ds_task_definition where code = #{code} </delete> <insert id="batchInsert"> insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values <foreach collection="taskDefinitions" item="taskDefinition" separator=","> (#{taskDefinition.code},#{taskDefinition.name},#{taskDefinition.version},#{taskDefinition.description}, #{taskDefinition.projectCode},#{taskDefinition.userId},#{taskDefinition.taskType},#{taskDefinition.taskParams}, #{taskDefinition.flag},#{taskDefinition.taskPriority},#{taskDefinition.workerGroup},#{taskDefinition.failRetryTimes}, #{taskDefinition.failRetryInterval},#{taskDefinition.timeoutFlag},#{taskDefinition.timeoutNotifyStrategy},#{taskDefinition.timeout}, #{taskDefinition.delayTime},#{taskDefinition.resourceIds},#{taskDefinition.createTime},#{taskDefinition.updateTime}) </foreach> </insert> <select id="queryDefineListPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinition"> select td.id, td.code, td.name, td.version, td.description, td.project_code, td.user_id, td.task_type, td.task_params, td.flag, td.task_priority, td.worker_group, td.fail_retry_times, td.fail_retry_interval, td.timeout_flag, td.timeout_notify_strategy, td.timeout, td.delay_time, td.resource_ids, td.create_time, td.update_time, u.user_name,p.name as project_name from t_ds_task_definition td JOIN t_ds_user u ON td.user_id = u.id JOIN t_ds_project p ON td.project_code = p.code where td.project_code = #{projectCode} <if test=" taskType != null and taskType != ''"> and td.task_type = #{taskType} </if> <if test=" searchVal != null and searchVal != ''"> and td.name like concat('%', #{searchVal}, '%') </if> <if test=" userId != 0"> and td.user_id = #{userId} </if> order by td.update_time desc </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler</artifactId> <version>1.3.6-SNAPSHOT</version> <packaging>pom</packaging> <name>${project.artifactId}</name> <url>http://dolphinscheduler.apache.org</url> <description>Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing. </description> <licenses> <license> <name>Apache License 2.0</name> <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url> <distribution>repo</distribution> </license> </licenses> <scm> <connection>scm:git:https://github.com/apache/dolphinscheduler.git</connection> <developerConnection>scm:git:https://github.com/apache/dolphinscheduler.git</developerConnection> <url>https://github.com/apache/dolphinscheduler</url> <tag>HEAD</tag> </scm> <mailingLists> <mailingList> <name>DolphinScheduler Developer List</name> <post>dev@dolphinscheduler.apache.org</post> <subscribe>dev-subscribe@dolphinscheduler.apache.org</subscribe> <unsubscribe>dev-unsubscribe@dolphinscheduler.apache.org</unsubscribe> </mailingList> </mailingLists> <parent> <groupId>org.apache</groupId> <artifactId>apache</artifactId> <version>21</version> </parent> <properties> <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding> <project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding> <curator.version>4.3.0</curator.version> <zookeeper.version>3.4.14</zookeeper.version> <spring.version>5.1.19.RELEASE</spring.version> <spring.boot.version>2.1.18.RELEASE</spring.boot.version> <java.version>1.8</java.version> <logback.version>1.2.3</logback.version> <hadoop.version>2.7.3</hadoop.version> <quartz.version>2.3.0</quartz.version> <jackson.version>2.10.5</jackson.version> <mybatis-plus.version>3.2.0</mybatis-plus.version> <mybatis.spring.version>2.0.1</mybatis.spring.version> <cron.utils.version>5.0.5</cron.utils.version> <druid.version>1.1.22</druid.version> <h2.version>1.4.200</h2.version> <commons.codec.version>1.11</commons.codec.version> <commons.logging.version>1.1.1</commons.logging.version> <httpclient.version>4.4.1</httpclient.version> <httpcore.version>4.4.1</httpcore.version> <junit.version>4.12</junit.version> <mysql.connector.version>5.1.34</mysql.connector.version> <slf4j.api.version>1.7.5</slf4j.api.version> <slf4j.log4j12.version>1.7.5</slf4j.log4j12.version> <commons.collections.version>3.2.2</commons.collections.version> <commons.httpclient>3.0.1</commons.httpclient> <commons.beanutils.version>1.9.4</commons.beanutils.version> <commons.configuration.version>1.10</commons.configuration.version> <commons.email.version>1.5</commons.email.version> <poi.version>4.1.2</poi.version> <javax.servlet.api.version>3.1.0</javax.servlet.api.version> <commons.collections4.version>4.1</commons.collections4.version> <guava.version>24.1-jre</guava.version> <postgresql.version>42.2.5</postgresql.version> <hive.jdbc.version>2.1.0</hive.jdbc.version> <commons.io.version>2.4</commons.io.version> <oshi.core.version>3.9.1</oshi.core.version> <clickhouse.jdbc.version>0.1.52</clickhouse.jdbc.version> <mssql.jdbc.version>6.1.0.jre8</mssql.jdbc.version> <presto.jdbc.version>0.238.1</presto.jdbc.version> <spotbugs.version>3.1.12</spotbugs.version> <checkstyle.version>3.1.2</checkstyle.version> <zookeeper.version>3.4.14</zookeeper.version> <curator.test>2.12.0</curator.test> <frontend-maven-plugin.version>1.6</frontend-maven-plugin.version> <maven-compiler-plugin.version>3.3</maven-compiler-plugin.version> <maven-assembly-plugin.version>3.1.0</maven-assembly-plugin.version> <maven-release-plugin.version>2.5.3</maven-release-plugin.version> <maven-javadoc-plugin.version>2.10.3</maven-javadoc-plugin.version> <maven-source-plugin.version>2.4</maven-source-plugin.version> <maven-surefire-plugin.version>2.22.1</maven-surefire-plugin.version> <maven-dependency-plugin.version>3.1.1</maven-dependency-plugin.version> <rpm-maven-plugion.version>2.2.0</rpm-maven-plugion.version> <jacoco.version>0.8.4</jacoco.version> <jcip.version>1.0</jcip.version> <maven.deploy.skip>false</maven.deploy.skip> <cobertura-maven-plugin.version>2.7</cobertura-maven-plugin.version> <mockito.version>2.21.0</mockito.version> <powermock.version>2.0.2</powermock.version> <servlet-api.version>2.5</servlet-api.version> <swagger.version>1.9.3</swagger.version> <springfox.version>2.9.2</springfox.version> <swagger-models.version>1.5.24</swagger-models.version> <guava-retry.version>2.0.0</guava-retry.version> <dep.airlift.version>0.184</dep.airlift.version> <dep.packaging.version>${dep.airlift.version}</dep.packaging.version> <protostuff.version>1.7.2</protostuff.version> <reflections.version>0.9.12</reflections.version> <byte-buddy.version>1.9.16</byte-buddy.version> </properties> <dependencyManagement> <dependencies> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus-boot-starter</artifactId> <version>${mybatis-plus.version}</version> </dependency> <dependency> <groupId>com.baomidou</groupId> <artifactId>mybatis-plus</artifactId> <version>${mybatis-plus.version}</version> </dependency> <!-- quartz--> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>org.quartz-scheduler</groupId> <artifactId>quartz-jobs</artifactId> <version>${quartz.version}</version> </dependency> <dependency> <groupId>com.cronutils</groupId> <artifactId>cron-utils</artifactId> <version>${cron.utils.version}</version> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> <version>${druid.version}</version> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>${spring.boot.version}</version> <type>pom</type> <scope>import</scope> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-core</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-context</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-beans</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-tx</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-jdbc</artifactId> <version>${spring.version}</version> </dependency> <dependency> <groupId>org.springframework</groupId> <artifactId>spring-test</artifactId> <version>${spring.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-standalone-server</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-common</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-registry-plugin</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-dao</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-api</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-remote</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-service</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-alert</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <version>${project.version}</version> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-framework</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> <version>${zookeeper.version}</version> <exclusions> <exclusion> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> </exclusion> <exclusion> <artifactId>netty</artifactId> <groupId>io.netty</groupId> </exclusion> <exclusion> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-annotations</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>log4j-1.2-api</groupId> <artifactId>org.apache.logging.log4j</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-recipes</artifactId> <version>${curator.version}</version> <exclusions> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.curator</groupId> <artifactId>curator-test</artifactId> <version>${curator.test}</version> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> <version>${commons.codec.version}</version> </dependency> <dependency> <groupId>commons-logging</groupId> <artifactId>commons-logging</artifactId> <version>${commons.logging.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>${httpclient.version}</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpcore</artifactId> <version>${httpcore.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-annotations</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-databind</artifactId> <version>${jackson.version}</version> </dependency> <dependency> <groupId>com.fasterxml.jackson.core</groupId> <artifactId>jackson-core</artifactId> <version>${jackson.version}</version> </dependency> <!--protostuff--> <!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-core --> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-core</artifactId> <version>${protostuff.version}</version> </dependency> <!-- https://mvnrepository.com/artifact/io.protostuff/protostuff-runtime --> <dependency> <groupId>io.protostuff</groupId> <artifactId>protostuff-runtime</artifactId> <version>${protostuff.version}</version> </dependency> <dependency> <groupId>net.bytebuddy</groupId> <artifactId>byte-buddy</artifactId> <version>${byte-buddy.version}</version> </dependency> <dependency> <groupId>org.reflections</groupId> <artifactId>reflections</artifactId> <version>${reflections.version}</version> </dependency> <dependency> <groupId>junit</groupId> <artifactId>junit</artifactId> <version>${junit.version}</version> </dependency> <dependency> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> <version>${mockito.version}</version> <type>jar</type> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-module-junit4</artifactId> <version>${powermock.version}</version> <type>jar</type> <scope>test</scope> </dependency> <dependency> <groupId>org.powermock</groupId> <artifactId>powermock-api-mockito2</artifactId> <version>${powermock.version}</version> <type>jar</type> <scope>test</scope> <exclusions> <exclusion> <groupId>org.mockito</groupId> <artifactId>mockito-core</artifactId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.jacoco</groupId> <artifactId>org.jacoco.agent</artifactId> <version>${jacoco.version}</version> <classifier>runtime</classifier> <scope>test</scope> </dependency> <dependency> <groupId>mysql</groupId> <artifactId>mysql-connector-java</artifactId> <version>${mysql.connector.version}</version> <scope>test</scope> </dependency> <dependency> <groupId>com.h2database</groupId> <artifactId>h2</artifactId> <version>${h2.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <version>${slf4j.api.version}</version> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-log4j12</artifactId> <version>${slf4j.log4j12.version}</version> </dependency> <dependency> <groupId>commons-collections</groupId> <artifactId>commons-collections</artifactId> <version>${commons.collections.version}</version> </dependency> <dependency> <groupId>commons-httpclient</groupId> <artifactId>commons-httpclient</artifactId> <version>${commons.httpclient}</version> </dependency> <dependency> <groupId>commons-beanutils</groupId> <artifactId>commons-beanutils</artifactId> <version>${commons.beanutils.version}</version> </dependency> <dependency> <groupId>commons-configuration</groupId> <artifactId>commons-configuration</artifactId> <version>${commons.configuration.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-core</artifactId> <version>${logback.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-email</artifactId> <version>${commons.email.version}</version> </dependency> <!--excel poi--> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi</artifactId> <version>${poi.version}</version> </dependency> <dependency> <groupId>org.apache.poi</groupId> <artifactId>poi-ooxml</artifactId> <version>${poi.version}</version> </dependency> <!-- hadoop --> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-common</artifactId> <version>${hadoop.version}</version> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <artifactId>com.sun.jersey</artifactId> <groupId>jersey-json</groupId> </exclusion> </exclusions> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-client</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-hdfs</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-common</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-aws</artifactId> <version>${hadoop.version}</version> </dependency> <dependency> <groupId>org.apache.commons</groupId> <artifactId>commons-collections4</artifactId> <version>${commons.collections4.version}</version> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> <version>${guava.version}</version> </dependency> <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> <version>${postgresql.version}</version> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <version>${hive.jdbc.version}</version> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> <version>${commons.io.version}</version> </dependency> <dependency> <groupId>com.github.oshi</groupId> <artifactId>oshi-core</artifactId> <version>${oshi.core.version}</version> </dependency> <dependency> <groupId>ru.yandex.clickhouse</groupId> <artifactId>clickhouse-jdbc</artifactId> <version>${clickhouse.jdbc.version}</version> </dependency> <dependency> <groupId>com.microsoft.sqlserver</groupId> <artifactId>mssql-jdbc</artifactId> <version>${mssql.jdbc.version}</version> </dependency> <dependency> <groupId>com.facebook.presto</groupId> <artifactId>presto-jdbc</artifactId> <version>${presto.jdbc.version}</version> </dependency> <dependency> <groupId>net.jcip</groupId> <artifactId>jcip-annotations</artifactId> <version>${jcip.version}</version> <optional>true</optional> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> <version>${servlet-api.version}</version> </dependency> <dependency> <groupId>javax.servlet</groupId> <artifactId>javax.servlet-api</artifactId> <version>${javax.servlet.api.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger2</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.springfox</groupId> <artifactId>springfox-swagger-ui</artifactId> <version>${springfox.version}</version> </dependency> <dependency> <groupId>io.swagger</groupId> <artifactId>swagger-models</artifactId> <version>${swagger-models.version}</version> </dependency> <dependency> <groupId>com.github.xiaoymin</groupId> <artifactId>swagger-bootstrap-ui</artifactId> <version>${swagger.version}</version> </dependency> <dependency> <groupId>com.github.rholder</groupId> <artifactId>guava-retrying</artifactId> <version>${guava-retry.version}</version> </dependency> <dependency> <groupId>org.sonatype.aether</groupId> <artifactId>aether-api</artifactId> <version>1.13.1</version> </dependency> <dependency> <groupId>io.airlift.resolver</groupId> <artifactId>resolver</artifactId> <version>1.5</version> </dependency> <dependency> <groupId>org.ow2.asm</groupId> <artifactId>asm</artifactId> <version>6.2.1</version> </dependency> <dependency> <groupId>javax.activation</groupId> <artifactId>activation</artifactId> <version>1.1</version> </dependency> <dependency> <groupId>com.sun.mail</groupId> <artifactId>javax.mail</artifactId> <version>1.6.2</version> </dependency> </dependencies> </dependencyManagement> <build> <finalName>apache-dolphinscheduler-${project.version}</finalName> <pluginManagement> <plugins> <plugin> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-maven-plugin</artifactId> <version>1.0.0</version> <extensions>true</extensions> </plugin> <plugin> <groupId>ca.vanzyl.maven.plugins</groupId> <artifactId>provisio-maven-plugin</artifactId> <version>1.0.4</version> <extensions>true</extensions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>rpm-maven-plugin</artifactId> <version>${rpm-maven-plugion.version}</version> <inherited>false</inherited> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <configuration> <source>${java.version}</source> <target>${java.version}</target> <testSource>${java.version}</testSource> <testTarget>${java.version}</testTarget> </configuration> <version>${maven-compiler-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <tagNameFormat>@{project.version}</tagNameFormat> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-assembly-plugin</artifactId> <version>${maven-assembly-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <configuration> <source>8</source> <failOnError>false</failOnError> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-source-plugin</artifactId> <version>${maven-source-plugin.version}</version> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-dependency-plugin</artifactId> <version>${maven-dependency-plugin.version}</version> </plugin> </plugins> </pluginManagement> <plugins> <plugin> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-maven-plugin</artifactId> <extensions>true</extensions> <!--<configuration>--> <!--<allowedProvidedDependencies>--> <!--<allowedProvidedDependency>org.apache.dolphinscheduler:dolphinscheduler-common</allowedProvidedDependency>--> <!--</allowedProvidedDependencies>--> <!--</configuration>--> </plugin> <plugin> <groupId>ca.vanzyl.maven.plugins</groupId> <artifactId>provisio-maven-plugin</artifactId> <extensions>true</extensions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-source-plugin</artifactId> <executions> <execution> <id>attach-sources</id> <phase>verify</phase> <goals> <goal>jar-no-fork</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-javadoc-plugin</artifactId> <version>${maven-javadoc-plugin.version}</version> <executions> <execution> <id>attach-javadocs</id> <goals> <goal>jar</goal> </goals> </execution> </executions> <configuration> <aggregate>true</aggregate> <charset>${project.build.sourceEncoding}</charset> <encoding>${project.build.sourceEncoding}</encoding> <docencoding>${project.build.sourceEncoding}</docencoding> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-release-plugin</artifactId> <version>${maven-release-plugin.version}</version> <configuration> <autoVersionSubmodules>true</autoVersionSubmodules> <tagNameFormat>@{project.version}</tagNameFormat> <tagBase>${project.version}</tagBase> <!--<goals>-f pom.xml deploy</goals>--> </configuration> <dependencies> <dependency> <groupId>org.apache.maven.scm</groupId> <artifactId>maven-scm-provider-jgit</artifactId> <version>1.9.5</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-compiler-plugin</artifactId> <version>${maven-compiler-plugin.version}</version> <configuration> <source>${java.version}</source> <target>${java.version}</target> <encoding>${project.build.sourceEncoding}</encoding> <skip>false</skip><!--not skip compile test classes--> </configuration> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-surefire-plugin</artifactId> <version>${maven-surefire-plugin.version}</version> <configuration> <systemPropertyVariables> <jacoco-agent.destfile>${project.build.directory}/jacoco.exec</jacoco-agent.destfile> </systemPropertyVariables> <includes> <!--registry plugin --> <include>**/plugin/registry/zookeeper/ZookeeperRegistryTest.java</include> <!-- API --> <include>**/api/controller/ProjectControllerTest.java</include> <include>**/api/controller/QueueControllerTest.java</include> <include>**/api/configuration/TrafficConfigurationTest.java</include> <include>**/api/controller/ProcessDefinitionControllerTest.java</include> <include>**/api/controller/TenantControllerTest.java</include> <include>**/api/controller/SchedulerControllerTest.java</include> <include>**/api/dto/resources/filter/ResourceFilterTest.java</include> <include>**/api/dto/resources/visitor/ResourceTreeVisitorTest.java</include> <includeDataxTaskTest>**/api/enums/testGetEnum.java</includeDataxTaskTest> <include>**/api/enums/StatusTest.java</include> <include>**/api/exceptions/ApiExceptionHandlerTest.java</include> <include>**/api/exceptions/ServiceExceptionTest.java</include> <include>**/api/interceptor/LocaleChangeInterceptorTest.java</include> <include>**/api/interceptor/LoginHandlerInterceptorTest.java</include> <include>**/api/interceptor/RateLimitInterceptorTest.java</include> <include>**/api/security/impl/pwd/PasswordAuthenticatorTest.java</include> <include>**/api/security/impl/ldap/LdapAuthenticatorTest.java</include> <include>**/api/security/SecurityConfigLDAPTest.java</include> <include>**/api/security/SecurityConfigPasswordTest.java</include> <include>**/api/service/AccessTokenServiceTest.java</include> <include>**/api/service/AlertGroupServiceTest.java</include> <include>**/api/service/BaseDAGServiceTest.java</include> <include>**/api/service/BaseServiceTest.java</include> <include>**/api/service/DataAnalysisServiceTest.java</include> <include>**/api/service/AlertPluginInstanceServiceTest.java</include> <include>**/api/service/DataSourceServiceTest.java</include> <include>**/api/service/ExecutorService2Test.java</include> <include>**/api/service/ExecutorServiceTest.java</include> <include>**/api/service/LoggerServiceTest.java</include> <include>**/api/service/MonitorServiceTest.java</include> <include>**/api/service/ProcessDefinitionServiceTest.java</include> <include>**/api/service/ProcessTaskRelationServiceImplTest.java</include> <include>**/api/service/TaskDefinitionServiceImplTest.java</include> <include>**/api/service/ProcessInstanceServiceTest.java</include> <include>**/api/service/ProjectServiceTest.java</include> <include>**/api/service/QueueServiceTest.java</include> <include>**/api/service/ResourcesServiceTest.java</include> <include>**/api/service/SchedulerServiceTest.java</include> <include>**/api/service/SessionServiceTest.java</include> <include>**/api/service/TaskInstanceServiceTest.java</include> <include>**/api/service/TenantServiceTest.java</include> <include>**/api/service/UdfFuncServiceTest.java</include> <include>**/api/service/UiPluginServiceTest.java</include> <include>**/api/service/UserAlertGroupServiceTest.java</include> <include>**/api/service/UsersServiceTest.java</include> <include>**/api/service/WorkerGroupServiceTest.java</include> <include>**/api/service/WorkFlowLineageServiceTest.java</include> <include>**/api/service/EnvironmentServiceTest.java</include> <include>**/api/service/EnvironmentWorkerGroupRelationServiceTest.java</include> <include>**/api/controller/ProcessDefinitionControllerTest.java</include> <include>**/api/controller/TaskInstanceControllerTest.java</include> <include>**/api/controller/WorkFlowLineageControllerTest.java</include> <include>**/api/controller/EnvironmentControllerTest.java</include> <include>**/api/utils/exportprocess/DataSourceParamTest.java</include> <include>**/api/utils/exportprocess/DependentParamTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/FileUtilsTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/CheckUtilsTest.java</include> <include>**/api/utils/ResultTest.java</include> <include>**/common/graph/DAGTest.java</include> <include>**/common/os/OshiTest.java</include> <include>**/common/shell/ShellExecutorTest.java</include> <include>**/common/task/DataxParametersTest.java</include> <include>**/common/task/EntityTestUtils.java</include> <include>**/common/task/FlinkParametersTest.java</include> <include>**/common/task/HttpParametersTest.java</include> <include>**/common/task/SparkParametersTest.java</include> <include>**/common/task/SqlParametersTest.java</include> <include>**/common/task/SqoopParameterEntityTest.java</include> <include>**/common/threadutils/ThreadPoolExecutorsTest.java</include> <include>**/common/threadutils/ThreadUtilsTest.java</include> <include>**/common/utils/CollectionUtilsTest.java</include> <include>**/common/utils/CommonUtilsTest.java</include> <include>**/common/utils/DateUtilsTest.java</include> <include>**/common/utils/DependentUtilsTest.java</include> <include>**/common/utils/EncryptionUtilsTest.java</include> <include>**/common/utils/FileUtilsTest.java</include> <include>**/common/utils/JSONUtilsTest.java</include> <include>**/common/utils/LoggerUtilsTest.java</include> <include>**/common/utils/NetUtilsTest.java</include> <include>**/common/utils/ParameterUtilsTest.java</include> <include>**/common/utils/TimePlaceholderUtilsTest.java</include> <include>**/common/utils/PreconditionsTest.java</include> <include>**/common/utils/PropertyUtilsTest.java</include> <include>**/common/utils/SchemaUtilsTest.java</include> <include>**/common/utils/ScriptRunnerTest.java</include> <include>**/common/utils/SensitiveLogUtilsTest.java</include> <include>**/common/utils/StringTest.java</include> <include>**/common/utils/StringUtilsTest.java</include> <include>**/common/utils/TaskParametersUtilsTest.java</include> <include>**/common/utils/VarPoolUtilsTest.java</include> <include>**/common/utils/HadoopUtilsTest.java</include> <include>**/common/utils/HttpUtilsTest.java</include> <include>**/common/utils/KerberosHttpClientTest.java</include> <include>**/common/utils/HiveConfUtilsTest.java</include> <include>**/common/ConstantsTest.java</include> <include>**/common/utils/HadoopUtils.java</include> <include>**/common/utils/RetryerUtilsTest.java</include> <include>**/common/datasource/clickhouse/ClickHouseDatasourceProcessorTest.java</include> <include>**/common/datasource/db2/Db2DatasourceProcessorTest.java</include> <include>**/common/datasource/hive/HiveDatasourceProcessorTest.java</include> <include>**/common/datasource/mysql/MysqlDatasourceProcessorTest.java</include> <include>**/common/datasource/oracle/OracleDatasourceProcessorTest.java</include> <include>**/common/datasource/postgresql/PostgreSqlDatasourceProcessorTest.java</include> <include>**/common/datasource/presto/PrestoDatasourceProcessorTest.java</include> <include>**/common/datasource/spark/SparkDatasourceProcessorTest.java</include> <include>**/common/datasource/sqlserver/SqlServerDatasourceProcessorTest.java</include> <include>**/common/datasource/DatasourceUtilTest.java</include> <include>**/common/enums/ExecutionStatusTest</include> <include>**/dao/mapper/AccessTokenMapperTest.java</include> <include>**/dao/mapper/AlertGroupMapperTest.java</include> <include>**/dao/mapper/CommandMapperTest.java</include> <include>**/dao/mapper/ConnectionFactoryTest.java</include> <include>**/dao/mapper/DataSourceMapperTest.java</include> <include>**/dao/datasource/MySQLDataSourceTest.java</include> <include>**/dao/entity/TaskInstanceTest.java</include> <include>**/dao/entity/UdfFuncTest.java</include> <include>**/remote/command/alert/AlertSendRequestCommandTest.java</include> <include>**/remote/command/alert/AlertSendResponseCommandTest.java</include> <include>**/remote/command/future/ResponseFutureTest.java</include> <include>**/remote/command/log/RemoveTaskLogRequestCommandTest.java</include> <include>**/remote/command/log/RemoveTaskLogResponseCommandTest.java</include> <include>**/remote/command/log/GetLogBytesRequestCommandTest.java</include> <include>**/remote/command/log/GetLogBytesResponseCommandTest.java</include> <include>**/remote/command/log/ViewLogRequestCommandTest.java</include> <include>**/remote/utils/HostTest.java</include> <include>**/remote/utils/NettyUtilTest.java</include> <include>**/remote/NettyRemotingClientTest.java</include> <include>**/rpc/RpcTest.java</include> <include>**/server/log/LoggerServerTest.java</include> <include>**/server/entity/SQLTaskExecutionContextTest.java</include> <include>**/server/log/MasterLogFilterTest.java</include> <include>**/server/log/SensitiveDataConverterTest.java</include> <include>**/server/log/LoggerRequestProcessorTest.java</include> <!--<include>**/server/log/TaskLogDiscriminatorTest.java</include>--> <include>**/server/log/TaskLogFilterTest.java</include> <include>**/server/log/WorkerLogFilterTest.java</include> <include>**/server/master/cache/impl/TaskInstanceCacheManagerImplTest.java</include> <include>**/server/master/config/MasterConfigTest.java</include> <include>**/server/master/consumer/TaskPriorityQueueConsumerTest.java</include> <include>**/server/master/runner/MasterTaskExecThreadTest.java</include> <!--<include>**/server/master/dispatch/executor/NettyExecutorManagerTest.java</include>--> <include>**/server/master/dispatch/host/assign/LowerWeightRoundRobinTest.java</include> <include>**/server/master/dispatch/host/assign/RandomSelectorTest.java</include> <include>**/server/master/dispatch/host/assign/RoundRobinSelectorTest.java</include> <include>**/server/master/dispatch/host/assign/HostWorkerTest.java</include> <include>**/server/master/registry/MasterRegistryClientTest.java</include> <include>**/server/master/registry/ServerNodeManagerTest.java</include> <include>**/server/master/dispatch/host/assign/RoundRobinHostManagerTest.java</include> <include>**/server/master/MasterCommandTest.java</include> <include>**/server/master/DependentTaskTest.java</include> <include>**/server/master/ConditionsTaskTest.java</include> <include>**/server/master/SwitchTaskTest.java</include> <include>**/server/master/MasterExecThreadTest.java</include> <include>**/server/master/ParamsTest.java</include> <include>**/server/master/SubProcessTaskTest.java</include> <include>**/server/master/processor/TaskAckProcessorTest.java</include> <include>**/server/master/processor/TaskKillResponseProcessorTest.java</include> <include>**/server/master/processor/queue/TaskResponseServiceTest.java</include> <include>**/server/master/zk/ZKMasterClientTest.java</include> <include>**/server/registry/ZookeeperRegistryCenterTest.java</include> <include>**/server/utils/DataxUtilsTest.java</include> <include>**/server/utils/ExecutionContextTestUtils.java</include> <include>**/server/utils/FlinkArgsUtilsTest.java</include> <include>**/server/utils/LogUtilsTest.java</include> <include>**/server/utils/MapReduceArgsUtilsTest.java</include> <include>**/server/utils/ParamUtilsTest.java</include> <include>**/server/utils/ProcessUtilsTest.java</include> <include>**/server/utils/SparkArgsUtilsTest.java</include> <include>**/server/worker/processor/TaskCallbackServiceTest.java</include> <include>**/server/worker/processor/TaskExecuteProcessorTest.java</include> <include>**/server/worker/registry/WorkerRegistryTest.java</include> <include>**/server/worker/shell/ShellCommandExecutorTest.java</include> <include>**/server/worker/sql/SqlExecutorTest.java</include> <include>**/server/worker/task/spark/SparkTaskTest.java</include> <include>**/server/worker/task/spark/SparkTaskTest.java</include> <include>**/server/worker/task/datax/DataxTaskTest.java</include> <!--<include>**/server/worker/task/http/HttpTaskTest.java</include>--> <include>**/server/worker/task/sqoop/SqoopTaskTest.java</include> <include>**/server/worker/task/processdure/ProcedureTaskTest.java</include> <include>**/server/worker/task/shell/ShellTaskTest.java</include> <include>**/server/worker/task/TaskManagerTest.java</include> <include>**/server/worker/task/PythonCommandExecutorTest.java</include> <include>**/server/worker/task/TaskParamsTest.java</include> <include>**/server/worker/task/ShellTaskReturnTest.java</include> <include>**/server/worker/task/sql/SqlTaskTest.java</include> <include>**/server/worker/runner/TaskExecuteThreadTest.java</include> <include>**/server/worker/runner/WorkerManagerThreadTest.java</include> <include>**/service/quartz/cron/CronUtilsTest.java</include> <include>**/service/process/ProcessServiceTest.java</include> <include>**/service/registry/RegistryClientTest.java</include> <include>**/service/registry/RegistryPluginTest.java</include> <include>**/service/queue/TaskUpdateQueueTest.java</include> <include>**/service/queue/PeerTaskInstancePriorityQueueTest.java</include> <include>**/service/log/LogClientServiceTest.java</include> <include>**/service/alert/AlertClientServiceTest.java</include> <include>**/service/alert/ProcessAlertManagerTest.java</include> <include>**/dao/mapper/DataSourceUserMapperTest.java</include> <!--<iTaskUpdateQueueConsumerThreadnclude>**/dao/mapper/ErrorCommandMapperTest.java</iTaskUpdateQueueConsumerThreadnclude>--> <include>**/dao/mapper/ProcessDefinitionMapperTest.java</include> <include>**/dao/mapper/ProcessInstanceMapMapperTest.java</include> <include>**/dao/mapper/ProcessInstanceMapperTest.java</include> <include>**/dao/mapper/ProjectMapperTest.java</include> <include>**/dao/mapper/ProjectUserMapperTest.java</include> <include>**/dao/mapper/QueueMapperTest.java</include> <include>**/dao/mapper/ResourceUserMapperTest.java</include> <include>**/dao/mapper/ScheduleMapperTest.java</include> <include>**/dao/mapper/SessionMapperTest.java</include> <include>**/dao/mapper/TaskInstanceMapperTest.java</include> <include>**/dao/mapper/TenantMapperTest.java</include> <include>**/dao/mapper/UdfFuncMapperTest.java</include> <include>**/dao/mapper/UDFUserMapperTest.java</include> <include>**/dao/mapper/UserMapperTest.java</include> <include>**/dao/mapper/AlertPluginInstanceMapperTest.java</include> <include>**/dao/mapper/PluginDefineTest.java</include> <include>**/dao/utils/DagHelperTest.java</include> <include>**/dao/AlertDaoTest.java</include> <include>**/dao/datasource/OracleDataSourceTest.java</include> <include>**/dao/datasource/HiveDataSourceTest.java</include> <include>**/dao/datasource/BaseDataSourceTest.java</include> <include>**/dao/upgrade/ProcessDefinitionDaoTest.java</include> <include>**/dao/upgrade/WokrerGrouopDaoTest.java</include> <include>**/dao/upgrade/UpgradeDaoTest.java</include> <include>**/plugin/alert/email/EmailAlertChannelFactoryTest.java</include> <include>**/plugin/alert/email/EmailAlertChannelTest.java</include> <include>**/plugin/alert/email/ExcelUtilsTest.java</include> <include>**/plugin/alert/email/template/DefaultHTMLTemplateTest.java</include> <include>**/plugin/alert/dingtalk/DingTalkSenderTest.java</include> <include>**/plugin/alert/dingtalk/DingTalkAlertChannelFactoryTest.java</include> <include>**/plugin/alert/wechat/WeChatSenderTest.java</include> <include>**/plugin/alert/wechat/WeChatAlertChannelFactoryTest.java</include> <include>**/plugin/alert/script/ProcessUtilsTest.java</include> <include>**/plugin/alert/script/ScriptAlertChannelFactoryTest.java</include> <include>**/plugin/alert/script/ScriptSenderTest.java</include> <include>**/plugin/alert/http/HttpAlertChannelFactoryTest.java</include> <include>**/plugin/alert/http/HttpAlertChannelTest.java</include> <include>**/plugin/alert/feishu/FeiShuAlertChannelFactoryTest.java</include> <include>**/plugin/alert/feishu/FeiShuSenderTest.java</include> <include>**/plugin/alert/http/HttpAlertPluginTest.java</include> <include>**/plugin/alert/http/HttpSenderTest.java</include> <include>**/plugin/alert/slack/SlackAlertChannelFactoryTest.java</include> <include>**/plugin/alert/slack/SlackAlertPluginTest.java</include> <include>**/plugin/alert/slack/SlackSenderTest.java</include> <include>**/spi/params/PluginParamsTransferTest.java</include> <include>**/spi/plugin/DolphinSchedulerPluginLoaderTest.java</include> <include>**/alert/plugin/EmailAlertPluginTest.java</include> <include>**/alert/plugin/AlertPluginManagerTest.java</include> <include>**/alert/plugin/DolphinPluginLoaderTest.java</include> <include>**/alert/utils/FuncUtilsTest.java</include> <include>**/alert/processor/AlertRequestProcessorTest.java</include> <include>**/alert/runner/AlertSenderTest.java</include> <include>**/alert/AlertServerTest.java</include> <include>**/plugin/task/tis/TISTaskTest.java</include> </includes> <!-- <skip>true</skip> --> </configuration> </plugin> <!-- jenkins plugin jacoco report--> <plugin> <groupId>org.jacoco</groupId> <artifactId>jacoco-maven-plugin</artifactId> <version>${jacoco.version}</version> <configuration> <dataFile>${project.build.directory}/jacoco.exec</dataFile> </configuration> <executions> <execution> <id>default-instrument</id> <goals> <goal>instrument</goal> </goals> </execution> <execution> <id>default-restore-instrumented-classes</id> <goals> <goal>restore-instrumented-classes</goal> </goals> </execution> <execution> <id>default-report</id> <goals> <goal>report</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs-maven-plugin</artifactId> <version>${spotbugs.version}</version> <configuration> <xmlOutput>true</xmlOutput> <threshold>medium</threshold> <effort>default</effort> <excludeFilterFile>dev-config/spotbugs-exclude.xml</excludeFilterFile> <failOnError>true</failOnError> </configuration> <dependencies> <dependency> <groupId>com.github.spotbugs</groupId> <artifactId>spotbugs</artifactId> <version>4.0.0-beta4</version> </dependency> </dependencies> </plugin> <plugin> <groupId>org.apache.maven.plugins</groupId> <artifactId>maven-checkstyle-plugin</artifactId> <version>${checkstyle.version}</version> <dependencies> <dependency> <groupId>com.puppycrawl.tools</groupId> <artifactId>checkstyle</artifactId> <version>8.45</version> </dependency> </dependencies> <configuration> <consoleOutput>true</consoleOutput> <encoding>UTF-8</encoding> <configLocation>style/checkstyle.xml</configLocation> <failOnViolation>true</failOnViolation> <violationSeverity>warning</violationSeverity> <includeTestSourceDirectory>true</includeTestSourceDirectory> <sourceDirectories> <sourceDirectory>${project.build.sourceDirectory}</sourceDirectory> </sourceDirectories> <excludes>**\/generated-sources\/</excludes> </configuration> <executions> <execution> <phase>compile</phase> <goals> <goal>check</goal> </goals> </execution> </executions> </plugin> <plugin> <groupId>org.codehaus.mojo</groupId> <artifactId>cobertura-maven-plugin</artifactId> <version>${cobertura-maven-plugin.version}</version> <configuration> <check> </check> <aggregate>true</aggregate> <outputDirectory>./target/cobertura</outputDirectory> <encoding>${project.build.sourceEncoding}</encoding> <quiet>true</quiet> <format>xml</format> <instrumentation> <ignoreTrivial>true</ignoreTrivial> </instrumentation> </configuration> </plugin> </plugins> </build> <modules> <module>dolphinscheduler-spi</module> <module>dolphinscheduler-alert-plugin</module> <module>dolphinscheduler-registry-plugin</module> <module>dolphinscheduler-task-plugin</module> <module>dolphinscheduler-ui</module> <module>dolphinscheduler-server</module> <module>dolphinscheduler-common</module> <module>dolphinscheduler-api</module> <module>dolphinscheduler-dao</module> <module>dolphinscheduler-alert</module> <module>dolphinscheduler-dist</module> <module>dolphinscheduler-remote</module> <module>dolphinscheduler-service</module> <module>dolphinscheduler-microbench</module> <module>dolphinscheduler-standalone-server</module> </modules> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,152
[Bug][Server] Manual execution of workflow exception
**For better global communication, Please describe it in English. If you feel the description in English is not clear, then you can append description in Chinese(just for Mandarin(CN)), thx! ** **Describe the bug** Exception 1: ``` insert into t_ds_task_definition (code, `name`, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, create_time, update_time) values (?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?, ?,?,?,?) ### Cause: java.sql.SQLException: Field 'environment_code' doesn't have a default value ``` Exception 2: ``` insert into t_ds_task_instance ### Cause: java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ; bad SQL grammar []; nested exception is java.sql.SQLSyntaxErrorException: Unknown column 'environment_code' in 'field list' ``` Exception 3: Master-Server the TASK_EXECUTE_RESPONSE and TASK_EXECUTE_ACK processor class error
https://github.com/apache/dolphinscheduler/issues/6152
https://github.com/apache/dolphinscheduler/pull/6209
c1496d8a74ef69253d1873602a467ca61876bbfb
5fead427f227bd6261b677fff600e4f913462c5e
"2021-09-09T10:38:28Z"
java
"2021-09-16T07:07:22Z"
sql/dolphinscheduler_postgre.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; DROP TABLE IF EXISTS QRTZ_LOCKS; DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; DROP TABLE IF EXISTS QRTZ_TRIGGERS; DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_JOB_DETAILS( SCHED_NAME character varying(120) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, JOB_CLASS_NAME character varying(250) NOT NULL, IS_DURABLE boolean NOT NULL, IS_NONCONCURRENT boolean NOT NULL, IS_UPDATE_DATA boolean NOT NULL, REQUESTS_RECOVERY boolean NOT NULL, JOB_DATA bytea NULL); alter table QRTZ_JOB_DETAILS add primary key(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, JOB_NAME character varying(200) NOT NULL, JOB_GROUP character varying(200) NOT NULL, DESCRIPTION character varying(250) NULL, NEXT_FIRE_TIME BIGINT NULL, PREV_FIRE_TIME BIGINT NULL, PRIORITY INTEGER NULL, TRIGGER_STATE character varying(16) NOT NULL, TRIGGER_TYPE character varying(8) NOT NULL, START_TIME BIGINT NOT NULL, END_TIME BIGINT NULL, CALENDAR_NAME character varying(200) NULL, MISFIRE_INSTR SMALLINT NULL, JOB_DATA bytea NULL) ; alter table QRTZ_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, REPEAT_COUNT BIGINT NOT NULL, REPEAT_INTERVAL BIGINT NOT NULL, TIMES_TRIGGERED BIGINT NOT NULL) ; alter table QRTZ_SIMPLE_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, CRON_EXPRESSION character varying(120) NOT NULL, TIME_ZONE_ID character varying(80)) ; alter table QRTZ_CRON_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, STR_PROP_1 character varying(512) NULL, STR_PROP_2 character varying(512) NULL, STR_PROP_3 character varying(512) NULL, INT_PROP_1 INT NULL, INT_PROP_2 INT NULL, LONG_PROP_1 BIGINT NULL, LONG_PROP_2 BIGINT NULL, DEC_PROP_1 NUMERIC(13,4) NULL, DEC_PROP_2 NUMERIC(13,4) NULL, BOOL_PROP_1 boolean NULL, BOOL_PROP_2 boolean NULL) ; alter table QRTZ_SIMPROP_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, BLOB_DATA bytea NULL) ; alter table QRTZ_BLOB_TRIGGERS add primary key(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME character varying(120) NOT NULL, CALENDAR_NAME character varying(200) NOT NULL, CALENDAR bytea NOT NULL) ; alter table QRTZ_CALENDARS add primary key(SCHED_NAME,CALENDAR_NAME); CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME character varying(120) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL) ; alter table QRTZ_PAUSED_TRIGGER_GRPS add primary key(SCHED_NAME,TRIGGER_GROUP); CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME character varying(120) NOT NULL, ENTRY_ID character varying(200) NOT NULL, TRIGGER_NAME character varying(200) NOT NULL, TRIGGER_GROUP character varying(200) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, FIRED_TIME BIGINT NOT NULL, SCHED_TIME BIGINT NOT NULL, PRIORITY INTEGER NOT NULL, STATE character varying(16) NOT NULL, JOB_NAME character varying(200) NULL, JOB_GROUP character varying(200) NULL, IS_NONCONCURRENT boolean NULL, REQUESTS_RECOVERY boolean NULL) ; alter table QRTZ_FIRED_TRIGGERS add primary key(SCHED_NAME,ENTRY_ID); CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME character varying(120) NOT NULL, INSTANCE_NAME character varying(200) NOT NULL, LAST_CHECKIN_TIME BIGINT NOT NULL, CHECKIN_INTERVAL BIGINT NOT NULL) ; alter table QRTZ_SCHEDULER_STATE add primary key(SCHED_NAME,INSTANCE_NAME); CREATE TABLE QRTZ_LOCKS ( SCHED_NAME character varying(120) NOT NULL, LOCK_NAME character varying(40) NOT NULL) ; alter table QRTZ_LOCKS add primary key(SCHED_NAME,LOCK_NAME); CREATE INDEX IDX_QRTZ_J_REQ_RECOVERY ON QRTZ_JOB_DETAILS(SCHED_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_J_GRP ON QRTZ_JOB_DETAILS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_J ON QRTZ_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_JG ON QRTZ_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_T_C ON QRTZ_TRIGGERS(SCHED_NAME,CALENDAR_NAME); CREATE INDEX IDX_QRTZ_T_G ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_T_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_N_G_STATE ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NEXT_FIRE_TIME ON QRTZ_TRIGGERS(SCHED_NAME,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST ON QRTZ_TRIGGERS(SCHED_NAME,TRIGGER_STATE,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_T_NFT_ST_MISFIRE_GRP ON QRTZ_TRIGGERS(SCHED_NAME,MISFIRE_INSTR,NEXT_FIRE_TIME,TRIGGER_GROUP,TRIGGER_STATE); CREATE INDEX IDX_QRTZ_FT_TRIG_INST_NAME ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME); CREATE INDEX IDX_QRTZ_FT_INST_JOB_REQ_RCVRY ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,INSTANCE_NAME,REQUESTS_RECOVERY); CREATE INDEX IDX_QRTZ_FT_J_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_JG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,JOB_GROUP); CREATE INDEX IDX_QRTZ_FT_T_G ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_NAME,TRIGGER_GROUP); CREATE INDEX IDX_QRTZ_FT_TG ON QRTZ_FIRED_TRIGGERS(SCHED_NAME,TRIGGER_GROUP); -- -- Table structure for table t_ds_access_token -- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int NOT NULL , user_id int DEFAULT NULL , token varchar(64) DEFAULT NULL , expire_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alert -- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int NOT NULL , title varchar(64) DEFAULT NULL , content text , alert_status int DEFAULT '0' , log text , alertgroup_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_alertgroup -- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup( id int NOT NULL, alert_instance_ids varchar (255) DEFAULT NULL, create_user_id int4 DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT t_ds_alertgroup_name_un UNIQUE (group_name) ) ; -- -- Table structure for table t_ds_command -- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int NOT NULL , command_type int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , executor_id int DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_datasource -- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int NOT NULL , name varchar(64) NOT NULL , note varchar(255) DEFAULT NULL , type int NOT NULL , user_id int NOT NULL , connection_params text NOT NULL , create_time timestamp NOT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id), CONSTRAINT t_ds_datasource_name_un UNIQUE (name, type) ) ; -- -- Table structure for table t_ds_error_command -- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int NOT NULL , command_type int DEFAULT NULL , executor_id int DEFAULT NULL , process_definition_code bigint NOT NULL , command_param text , task_depend_type int DEFAULT NULL , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', message text , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_master_server -- -- -- Table structure for table t_ds_process_definition -- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT process_definition_unique UNIQUE (name, project_code) ) ; create index process_definition_index on t_ds_process_definition (code,id); DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , release_state int DEFAULT NULL , user_id int DEFAULT NULL , global_params text , locations text , warning_group_id int DEFAULT NULL , flag int DEFAULT NULL , timeout int DEFAULT '0' , tenant_id int DEFAULT '-1' , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT '-1', fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT task_definition_unique UNIQUE (name, project_code) ) ; create index task_definition_index on t_ds_task_definition (project_code,id); DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int NOT NULL , code bigint NOT NULL, name varchar(255) DEFAULT NULL , version int DEFAULT NULL , description text , project_code bigint DEFAULT NULL , user_id int DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_params text , flag int DEFAULT NULL , task_priority int DEFAULT NULL , worker_group varchar(255) DEFAULT NULL , environment_code bigint DEFAULT '-1', fail_retry_times int DEFAULT NULL , fail_retry_interval int DEFAULT NULL , timeout_flag int DEFAULT NULL , timeout_notify_strategy int DEFAULT NULL , timeout int DEFAULT '0' , delay_time int DEFAULT '0' , resource_ids varchar(255) DEFAULT NULL , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , project_code bigint DEFAULT NULL , process_definition_code bigint DEFAULT NULL , pre_task_code bigint DEFAULT NULL , pre_task_version int DEFAULT '0' , post_task_code bigint DEFAULT NULL , post_task_version int DEFAULT '0' , condition_type int DEFAULT NULL , condition_params text , operator int DEFAULT NULL , operate_time timestamp DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_process_instance -- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , process_definition_version int DEFAULT NULL , process_definition_code bigint DEFAULT NULL , state int DEFAULT NULL , recovery int DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , run_times int DEFAULT NULL , host varchar(135) DEFAULT NULL , command_type int DEFAULT NULL , command_param text , task_depend_type int DEFAULT NULL , max_try_times int DEFAULT '0' , failure_strategy int DEFAULT '0' , warning_type int DEFAULT '0' , warning_group_id int DEFAULT NULL , schedule_time timestamp DEFAULT NULL , command_start_time timestamp DEFAULT NULL , global_params text , process_instance_json text , flag int DEFAULT '1' , update_time timestamp NULL , is_sub_process int DEFAULT '0' , executor_id int NOT NULL , history_cmd text , dependence_schedule_times text , process_instance_priority int DEFAULT NULL , worker_group varchar(64) , environment_code bigint DEFAULT '-1', timeout int DEFAULT '0' , tenant_id int NOT NULL DEFAULT '-1' , var_pool text , PRIMARY KEY (id) ) ; create index process_instance_index on t_ds_process_instance (process_definition_code,id); create index start_time_index on t_ds_process_instance (start_time); -- -- Table structure for table t_ds_project -- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int NOT NULL , name varchar(100) DEFAULT NULL , code bigint NOT NULL, description varchar(200) DEFAULT NULL , user_id int DEFAULT NULL , flag int DEFAULT '1' , create_time timestamp DEFAULT CURRENT_TIMESTAMP , update_time timestamp DEFAULT CURRENT_TIMESTAMP , PRIMARY KEY (id) ) ; create index user_id_index on t_ds_project (user_id); -- -- Table structure for table t_ds_queue -- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int NOT NULL , queue_name varchar(64) DEFAULT NULL , queue varchar(64) DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_relation_datasource_user -- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int NOT NULL , user_id int NOT NULL , datasource_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_relation_process_instance -- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int NOT NULL , parent_process_instance_id int DEFAULT NULL , parent_task_instance_id int DEFAULT NULL , process_instance_id int DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_project_user -- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int NOT NULL , user_id int NOT NULL , project_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; create index relation_project_user_id_index on t_ds_relation_project_user (user_id); -- -- Table structure for table t_ds_relation_resources_user -- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int NOT NULL , user_id int NOT NULL , resources_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_relation_udfs_user -- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int NOT NULL , user_id int NOT NULL , udf_id int DEFAULT NULL , perm int DEFAULT '1' , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; ; -- -- Table structure for table t_ds_resources -- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int NOT NULL , alias varchar(64) DEFAULT NULL , file_name varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , user_id int DEFAULT NULL , type int DEFAULT NULL , size bigint DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , pid int, full_name varchar(64), is_directory int, PRIMARY KEY (id), CONSTRAINT t_ds_resources_un UNIQUE (full_name, type) ) ; -- -- Table structure for table t_ds_schedules -- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int NOT NULL , process_definition_code bigint NOT NULL , start_time timestamp NOT NULL , end_time timestamp NOT NULL , timezone_id varchar(40) default NULL , crontab varchar(255) NOT NULL , failure_strategy int NOT NULL , user_id int NOT NULL , release_state int NOT NULL , warning_type int NOT NULL , warning_group_id int DEFAULT NULL , process_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_session -- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL , user_id int DEFAULT NULL , ip varchar(45) DEFAULT NULL , last_login_time timestamp DEFAULT NULL , PRIMARY KEY (id) ); -- -- Table structure for table t_ds_task_instance -- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int NOT NULL , name varchar(255) DEFAULT NULL , task_type varchar(50) DEFAULT NULL , task_code bigint NOT NULL, task_definition_version int DEFAULT NULL , process_instance_id int DEFAULT NULL , state int DEFAULT NULL , submit_time timestamp DEFAULT NULL , start_time timestamp DEFAULT NULL , end_time timestamp DEFAULT NULL , host varchar(135) DEFAULT NULL , execute_path varchar(200) DEFAULT NULL , log_path varchar(200) DEFAULT NULL , alert_flag int DEFAULT NULL , retry_times int DEFAULT '0' , pid int DEFAULT NULL , app_link text , task_params text , flag int DEFAULT '1' , retry_interval int DEFAULT NULL , max_retry_times int DEFAULT NULL , task_instance_priority int DEFAULT NULL , worker_group varchar(64), environment_code bigint DEFAULT '-1', environment_config text, executor_id int DEFAULT NULL , first_submit_time timestamp DEFAULT NULL , delay_time int DEFAULT '0' , var_pool text , PRIMARY KEY (id), CONSTRAINT foreign_key_instance_id FOREIGN KEY(process_instance_id) REFERENCES t_ds_process_instance(id) ON DELETE CASCADE ) ; -- -- Table structure for table t_ds_tenant -- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int NOT NULL , tenant_code varchar(64) DEFAULT NULL , description varchar(255) DEFAULT NULL , queue_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_udfs -- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int NOT NULL , user_id int NOT NULL , func_name varchar(100) NOT NULL , class_name varchar(255) NOT NULL , type int NOT NULL , arg_types varchar(255) DEFAULT NULL , database varchar(255) DEFAULT NULL , description varchar(255) DEFAULT NULL , resource_id int NOT NULL , resource_name varchar(255) NOT NULL , create_time timestamp NOT NULL , update_time timestamp NOT NULL , PRIMARY KEY (id) ) ; -- -- Table structure for table t_ds_user -- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int NOT NULL , user_name varchar(64) DEFAULT NULL , user_password varchar(64) DEFAULT NULL , user_type int DEFAULT NULL , email varchar(64) DEFAULT NULL , phone varchar(11) DEFAULT NULL , tenant_id int DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , queue varchar(64) DEFAULT NULL , state int DEFAULT 1 , PRIMARY KEY (id) ); comment on column t_ds_user.state is 'state 0:disable 1:enable'; -- -- Table structure for table t_ds_version -- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int NOT NULL , version varchar(200) NOT NULL, PRIMARY KEY (id) ) ; create index version_index on t_ds_version(version); -- -- Table structure for table t_ds_worker_group -- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint NOT NULL , name varchar(255) NOT NULL , addr_list text DEFAULT NULL , create_time timestamp DEFAULT NULL , update_time timestamp DEFAULT NULL , PRIMARY KEY (id) , CONSTRAINT name_unique UNIQUE (name) ) ; -- -- Table structure for table t_ds_worker_server -- DROP TABLE IF EXISTS t_ds_worker_server; CREATE TABLE t_ds_worker_server ( id int NOT NULL , host varchar(45) DEFAULT NULL , port int DEFAULT NULL , zk_directory varchar(64) DEFAULT NULL , res_info varchar(255) DEFAULT NULL , create_time timestamp DEFAULT NULL , last_heartbeat_time timestamp DEFAULT NULL , PRIMARY KEY (id) ) ; DROP SEQUENCE IF EXISTS t_ds_access_token_id_sequence; CREATE SEQUENCE t_ds_access_token_id_sequence; ALTER TABLE t_ds_access_token ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_access_token_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alert_id_sequence; CREATE SEQUENCE t_ds_alert_id_sequence; ALTER TABLE t_ds_alert ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alert_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_alertgroup_id_sequence; CREATE SEQUENCE t_ds_alertgroup_id_sequence; ALTER TABLE t_ds_alertgroup ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_alertgroup_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_command_id_sequence; CREATE SEQUENCE t_ds_command_id_sequence; ALTER TABLE t_ds_command ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_command_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_datasource_id_sequence; CREATE SEQUENCE t_ds_datasource_id_sequence; ALTER TABLE t_ds_datasource ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_datasource_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_id_sequence; CREATE SEQUENCE t_ds_process_definition_id_sequence; ALTER TABLE t_ds_process_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_definition_log_id_sequence; CREATE SEQUENCE t_ds_process_definition_log_id_sequence; ALTER TABLE t_ds_process_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_id_sequence; CREATE SEQUENCE t_ds_task_definition_id_sequence; ALTER TABLE t_ds_task_definition ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_definition_log_id_sequence; CREATE SEQUENCE t_ds_task_definition_log_id_sequence; ALTER TABLE t_ds_task_definition_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_definition_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_id_sequence; ALTER TABLE t_ds_process_task_relation ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_task_relation_log_id_sequence; CREATE SEQUENCE t_ds_process_task_relation_log_id_sequence; ALTER TABLE t_ds_process_task_relation_log ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_task_relation_log_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_process_instance_id_sequence; CREATE SEQUENCE t_ds_process_instance_id_sequence; ALTER TABLE t_ds_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_project_id_sequence; CREATE SEQUENCE t_ds_project_id_sequence; ALTER TABLE t_ds_project ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_project_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_queue_id_sequence; CREATE SEQUENCE t_ds_queue_id_sequence; ALTER TABLE t_ds_queue ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_queue_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_datasource_user_id_sequence; CREATE SEQUENCE t_ds_relation_datasource_user_id_sequence; ALTER TABLE t_ds_relation_datasource_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_datasource_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_process_instance_id_sequence; CREATE SEQUENCE t_ds_relation_process_instance_id_sequence; ALTER TABLE t_ds_relation_process_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_process_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_project_user_id_sequence; CREATE SEQUENCE t_ds_relation_project_user_id_sequence; ALTER TABLE t_ds_relation_project_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_project_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_resources_user_id_sequence; CREATE SEQUENCE t_ds_relation_resources_user_id_sequence; ALTER TABLE t_ds_relation_resources_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_resources_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_relation_udfs_user_id_sequence; CREATE SEQUENCE t_ds_relation_udfs_user_id_sequence; ALTER TABLE t_ds_relation_udfs_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_relation_udfs_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_resources_id_sequence; CREATE SEQUENCE t_ds_resources_id_sequence; ALTER TABLE t_ds_resources ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_resources_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_schedules_id_sequence; CREATE SEQUENCE t_ds_schedules_id_sequence; ALTER TABLE t_ds_schedules ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_schedules_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_task_instance_id_sequence; CREATE SEQUENCE t_ds_task_instance_id_sequence; ALTER TABLE t_ds_task_instance ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_task_instance_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_tenant_id_sequence; CREATE SEQUENCE t_ds_tenant_id_sequence; ALTER TABLE t_ds_tenant ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_tenant_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_udfs_id_sequence; CREATE SEQUENCE t_ds_udfs_id_sequence; ALTER TABLE t_ds_udfs ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_udfs_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_user_id_sequence; CREATE SEQUENCE t_ds_user_id_sequence; ALTER TABLE t_ds_user ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_user_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_version_id_sequence; CREATE SEQUENCE t_ds_version_id_sequence; ALTER TABLE t_ds_version ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_version_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_group_id_sequence; CREATE SEQUENCE t_ds_worker_group_id_sequence; ALTER TABLE t_ds_worker_group ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_group_id_sequence'); DROP SEQUENCE IF EXISTS t_ds_worker_server_id_sequence; CREATE SEQUENCE t_ds_worker_server_id_sequence; ALTER TABLE t_ds_worker_server ALTER COLUMN id SET DEFAULT NEXTVAL('t_ds_worker_server_id_sequence'); -- Records of t_ds_user?user : admin , password : dolphinscheduler123 INSERT INTO t_ds_user(user_name, user_password, user_type, email, phone, tenant_id, state, create_time, update_time) VALUES ('admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', 1, '2018-03-27 15:48:50', '2018-10-24 17:40:22'); -- Records of t_ds_alertgroup, default admin warning group INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_queue(queue_name, queue, create_time, update_time) VALUES ('default', 'default', '2018-11-29 10:22:33', '2018-11-29 10:22:33'); -- Records of t_ds_queue,default queue name : default INSERT INTO t_ds_version(version) VALUES ('1.4.0'); -- -- Table structure for table t_ds_plugin_define -- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id serial NOT NULL, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text NULL, create_time timestamp NULL, update_time timestamp NULL, CONSTRAINT t_ds_plugin_define_pk PRIMARY KEY (id), CONSTRAINT t_ds_plugin_define_un UNIQUE (plugin_name, plugin_type) ); -- -- Table structure for table t_ds_alert_plugin_instance -- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id serial NOT NULL, plugin_define_id int4 NOT NULL, plugin_instance_params text NULL, create_time timestamp NULL, update_time timestamp NULL, instance_name varchar(200) NULL, CONSTRAINT t_ds_alert_plugin_instance_pk PRIMARY KEY (id) ); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id serial NOT NULL, code bigint NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id), CONSTRAINT environment_name_unique UNIQUE (name), CONSTRAINT environment_code_unique UNIQUE (code) ); -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id serial NOT NULL, environment_code bigint NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp DEFAULT NULL, update_time timestamp DEFAULT NULL, PRIMARY KEY (id) , CONSTRAINT environment_worker_group_unique UNIQUE (environment_code,worker_group) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,137
[Bug][Master] worker cannot be found if worker start earlier than master.
branch : dev phenomenon: 1 start worker 2 start master 3 start a process instance 4 master cannot find the worker ![image](https://user-images.githubusercontent.com/29528966/132493045-689ed026-b1ae-474e-9479-b71ce4220671.png) then i restart the master, this problem disappeared!
https://github.com/apache/dolphinscheduler/issues/6137
https://github.com/apache/dolphinscheduler/pull/6232
5fead427f227bd6261b677fff600e4f913462c5e
fa0b86ac824fe88acecb64178de87529ad9f2627
"2021-09-08T10:25:33Z"
java
"2021-09-16T07:08:00Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/registry/ServerNodeManager.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.registry; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_MASTERS; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.dao.AlertDao; import org.apache.dolphinscheduler.dao.entity.WorkerGroup; import org.apache.dolphinscheduler.dao.mapper.WorkerGroupMapper; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.service.queue.MasterPriorityQueue; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.apache.dolphinscheduler.spi.register.DataChangeEvent; import org.apache.dolphinscheduler.spi.register.SubscribeListener; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.annotation.PreDestroy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.InitializingBean; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * server node manager */ @Service public class ServerNodeManager implements InitializingBean { private final Logger logger = LoggerFactory.getLogger(ServerNodeManager.class); /** * master lock */ private final Lock masterLock = new ReentrantLock(); /** * worker group lock */ private final Lock workerGroupLock = new ReentrantLock(); /** * worker node info lock */ private final Lock workerNodeInfoLock = new ReentrantLock(); /** * worker group nodes */ private final ConcurrentHashMap<String, Set<String>> workerGroupNodes = new ConcurrentHashMap<>(); /** * master nodes */ private final Set<String> masterNodes = new HashSet<>(); /** * worker node info */ private final Map<String, String> workerNodeInfo = new HashMap<>(); /** * executor service */ private ScheduledExecutorService executorService; /** * zk client */ private RegistryClient registryClient = RegistryClient.getInstance(); /** * worker group mapper */ @Autowired private WorkerGroupMapper workerGroupMapper; private MasterPriorityQueue masterPriorityQueue = new MasterPriorityQueue(); /** * alert dao */ @Autowired private AlertDao alertDao; public static volatile List<Integer> SLOT_LIST = new ArrayList<>(); public static volatile Integer MASTER_SIZE = 0; public static Integer getSlot() { if (SLOT_LIST.size() > 0) { return SLOT_LIST.get(0); } return 0; } /** * init listener * * @throws Exception if error throws Exception */ @Override public void afterPropertiesSet() throws Exception { /** * load nodes from zookeeper */ load(); /** * init executor service */ executorService = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("ServerNodeManagerExecutor")); executorService.scheduleWithFixedDelay(new WorkerNodeInfoAndGroupDbSyncTask(), 0, 10, TimeUnit.SECONDS); /** * init MasterNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_MASTERS, new MasterDataListener()); /** * init WorkerNodeListener listener */ registryClient.subscribe(REGISTRY_DOLPHINSCHEDULER_WORKERS, new MasterDataListener()); } /** * load nodes from zookeeper */ public void load() { /** * master nodes from zookeeper */ updateMasterNodes(); /** * worker group nodes from zookeeper */ Set<String> workerGroups = registryClient.getWorkerGroupDirectly(); for (String workerGroup : workerGroups) { syncWorkerGroupNodes(workerGroup, registryClient.getWorkerGroupNodesDirectly(workerGroup)); } } /** * worker node info and worker group db sync task */ class WorkerNodeInfoAndGroupDbSyncTask implements Runnable { @Override public void run() { // sync worker node info Map<String, String> newWorkerNodeInfo = registryClient.getServerMaps(NodeType.WORKER, true); syncWorkerNodeInfo(newWorkerNodeInfo); // sync worker group nodes from database List<WorkerGroup> workerGroupList = workerGroupMapper.queryAllWorkerGroup(); if (CollectionUtils.isNotEmpty(workerGroupList)) { for (WorkerGroup wg : workerGroupList) { String workerGroup = wg.getName(); Set<String> nodes = new HashSet<>(); String[] addrs = wg.getAddrList().split(Constants.COMMA); for (String addr : addrs) { if (newWorkerNodeInfo.containsKey(addr)) { nodes.add(addr); } } if (!nodes.isEmpty()) { syncWorkerGroupNodes(workerGroup, nodes); } } } } } /** * worker group node listener */ class WorkerGroupNodeListener implements SubscribeListener { @Override public void notify(String path, DataChangeEvent dataChangeEvent) { if (registryClient.isWorkerPath(path)) { try { if (dataChangeEvent == DataChangeEvent.ADD) { logger.info("worker group node : {} added.", path); String group = parseGroup(path); Set<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); logger.info("currentNodes : {}", currentNodes); syncWorkerGroupNodes(group, currentNodes); } else if (dataChangeEvent == DataChangeEvent.REMOVE) { logger.info("worker group node : {} down.", path); String group = parseGroup(path); Set<String> currentNodes = registryClient.getWorkerGroupNodesDirectly(group); syncWorkerGroupNodes(group, currentNodes); alertDao.sendServerStopedAlert(1, path, "WORKER"); } } catch (IllegalArgumentException ex) { logger.warn(ex.getMessage()); } catch (Exception ex) { logger.error("WorkerGroupListener capture data change and get data failed", ex); } } } private String parseGroup(String path) { String[] parts = path.split("/"); if (parts.length < 6) { throw new IllegalArgumentException(String.format("worker group path : %s is not valid, ignore", path)); } return parts[parts.length - 2]; } } /** * master node listener */ class MasterDataListener implements SubscribeListener { @Override public void notify(String path, DataChangeEvent dataChangeEvent) { if (registryClient.isMasterPath(path)) { try { if (dataChangeEvent.equals(DataChangeEvent.ADD)) { logger.info("master node : {} added.", path); updateMasterNodes(); } if (dataChangeEvent.equals(DataChangeEvent.REMOVE)) { logger.info("master node : {} down.", path); updateMasterNodes(); alertDao.sendServerStopedAlert(1, path, "MASTER"); } } catch (Exception ex) { logger.error("MasterNodeListener capture data change and get data failed.", ex); } } } } private void updateMasterNodes() { SLOT_LIST.clear(); this.masterNodes.clear(); String nodeLock = registryClient.getMasterLockPath(); try { registryClient.getLock(nodeLock); Set<String> currentNodes = registryClient.getMasterNodesDirectly(); List<Server> masterNodes = registryClient.getServerList(NodeType.MASTER); syncMasterNodes(currentNodes, masterNodes); } catch (Exception e) { logger.error("update master nodes error", e); } finally { registryClient.releaseLock(nodeLock); } } /** * get master nodes * * @return master nodes */ public Set<String> getMasterNodes() { masterLock.lock(); try { return Collections.unmodifiableSet(masterNodes); } finally { masterLock.unlock(); } } /** * sync master nodes * * @param nodes master nodes * @param masterNodes */ private void syncMasterNodes(Set<String> nodes, List<Server> masterNodes) { masterLock.lock(); try { this.masterNodes.addAll(nodes); this.masterPriorityQueue.clear(); this.masterPriorityQueue.putList(masterNodes); int index = masterPriorityQueue.getIndex(NetUtils.getHost()); if (index >= 0) { MASTER_SIZE = nodes.size(); SLOT_LIST.add(masterPriorityQueue.getIndex(NetUtils.getHost())); } logger.info("update master nodes, master size: {}, slot: {}", MASTER_SIZE, SLOT_LIST.toString() ); } finally { masterLock.unlock(); } } /** * sync worker group nodes * * @param workerGroup worker group * @param nodes worker nodes */ private void syncWorkerGroupNodes(String workerGroup, Set<String> nodes) { workerGroupLock.lock(); try { workerGroup = workerGroup.toLowerCase(); Set<String> workerNodes = workerGroupNodes.getOrDefault(workerGroup, new HashSet<>()); workerNodes.clear(); workerNodes.addAll(nodes); workerGroupNodes.put(workerGroup, workerNodes); } finally { workerGroupLock.unlock(); } } public Map<String, Set<String>> getWorkerGroupNodes() { return Collections.unmodifiableMap(workerGroupNodes); } /** * get worker group nodes * * @param workerGroup workerGroup * @return worker nodes */ public Set<String> getWorkerGroupNodes(String workerGroup) { workerGroupLock.lock(); try { if (StringUtils.isEmpty(workerGroup)) { workerGroup = Constants.DEFAULT_WORKER_GROUP; } workerGroup = workerGroup.toLowerCase(); Set<String> nodes = workerGroupNodes.get(workerGroup); if (CollectionUtils.isNotEmpty(nodes)) { return Collections.unmodifiableSet(nodes); } return nodes; } finally { workerGroupLock.unlock(); } } /** * get worker node info * * @return worker node info */ public Map<String, String> getWorkerNodeInfo() { return Collections.unmodifiableMap(workerNodeInfo); } /** * get worker node info * * @param workerNode worker node * @return worker node info */ public String getWorkerNodeInfo(String workerNode) { workerNodeInfoLock.lock(); try { return workerNodeInfo.getOrDefault(workerNode, null); } finally { workerNodeInfoLock.unlock(); } } /** * sync worker node info * * @param newWorkerNodeInfo new worker node info */ private void syncWorkerNodeInfo(Map<String, String> newWorkerNodeInfo) { workerNodeInfoLock.lock(); try { workerNodeInfo.clear(); workerNodeInfo.putAll(newWorkerNodeInfo); } finally { workerNodeInfoLock.unlock(); } } /** * destroy */ @PreDestroy public void destroy() { executorService.shutdownNow(); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,137
[Bug][Master] worker cannot be found if worker start earlier than master.
branch : dev phenomenon: 1 start worker 2 start master 3 start a process instance 4 master cannot find the worker ![image](https://user-images.githubusercontent.com/29528966/132493045-689ed026-b1ae-474e-9479-b71ce4220671.png) then i restart the master, this problem disappeared!
https://github.com/apache/dolphinscheduler/issues/6137
https://github.com/apache/dolphinscheduler/pull/6232
5fead427f227bd6261b677fff600e4f913462c5e
fa0b86ac824fe88acecb64178de87529ad9f2627
"2021-09-08T10:25:33Z"
java
"2021-09-16T07:08:00Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/registry/WorkerRegistryClient.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.registry; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.REGISTRY_DOLPHINSCHEDULER_WORKERS; import static org.apache.dolphinscheduler.common.Constants.SINGLE_SLASH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.IStoppable; import org.apache.dolphinscheduler.common.enums.NodeType; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.remote.utils.NamedThreadFactory; import org.apache.dolphinscheduler.server.registry.HeartBeatTask; import org.apache.dolphinscheduler.server.worker.config.WorkerConfig; import org.apache.dolphinscheduler.service.registry.RegistryClient; import org.apache.commons.lang.StringUtils; import java.util.Date; import java.util.Set; import java.util.StringJoiner; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import javax.annotation.PostConstruct; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.google.common.collect.Sets; /** * worker registry */ @Service public class WorkerRegistryClient { private final Logger logger = LoggerFactory.getLogger(WorkerRegistryClient.class); /** * worker config */ @Autowired private WorkerConfig workerConfig; /** * heartbeat executor */ private ScheduledExecutorService heartBeatExecutor; private RegistryClient registryClient; /** * worker start time */ private String startTime; private Set<String> workerGroups; @PostConstruct public void initWorkRegistry() { this.workerGroups = workerConfig.getWorkerGroups(); this.startTime = DateUtils.dateToString(new Date()); this.registryClient = RegistryClient.getInstance(); this.heartBeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("HeartBeatExecutor")); } /** * registry */ public void registry() { String address = NetUtils.getAddr(workerConfig.getListenPort()); Set<String> workerZkPaths = getWorkerZkPaths(); int workerHeartbeatInterval = workerConfig.getWorkerHeartbeatInterval(); for (String workerZKPath : workerZkPaths) { registryClient.persistEphemeral(workerZKPath, ""); logger.info("worker node : {} registry to ZK {} successfully", address, workerZKPath); } HeartBeatTask heartBeatTask = new HeartBeatTask(startTime, workerConfig.getWorkerMaxCpuloadAvg(), workerConfig.getWorkerReservedMemory(), workerConfig.getHostWeight(), workerZkPaths, Constants.WORKER_TYPE, registryClient); this.heartBeatExecutor.scheduleAtFixedRate(heartBeatTask, workerHeartbeatInterval, workerHeartbeatInterval, TimeUnit.SECONDS); logger.info("worker node : {} heartbeat interval {} s", address, workerHeartbeatInterval); } /** * remove registry info */ public void unRegistry() { String address = getLocalAddress(); Set<String> workerZkPaths = getWorkerZkPaths(); for (String workerZkPath : workerZkPaths) { registryClient.remove(workerZkPath); logger.info("worker node : {} unRegistry from ZK {}.", address, workerZkPath); } this.heartBeatExecutor.shutdownNow(); logger.info("heartbeat executor shutdown"); registryClient.close(); } /** * get worker path */ public Set<String> getWorkerZkPaths() { Set<String> workerPaths = Sets.newHashSet(); String address = getLocalAddress(); String workerZkPathPrefix = REGISTRY_DOLPHINSCHEDULER_WORKERS; for (String workGroup : this.workerGroups) { StringJoiner workerPathJoiner = new StringJoiner(SINGLE_SLASH); workerPathJoiner.add(workerZkPathPrefix); if (StringUtils.isEmpty(workGroup)) { workGroup = DEFAULT_WORKER_GROUP; } // trim and lower case is need workerPathJoiner.add(workGroup.trim().toLowerCase()); workerPathJoiner.add(address); workerPaths.add(workerPathJoiner.toString()); } return workerPaths; } public void handleDeadServer(Set<String> nodeSet, NodeType nodeType, String opType) throws Exception { registryClient.handleDeadServer(nodeSet, nodeType, opType); } /** * get local address */ private String getLocalAddress() { return NetUtils.getAddr(workerConfig.getListenPort()); } public void setRegistryStoppable(IStoppable stoppable) { registryClient.setStoppable(stoppable); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,214
[Bug] [Server] StandaloneServer can't save workflow correctly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When use StandaloneServer, after creating a workflow in a project and click save button, the Web page show success information. but no workflow appears on the list. ### What you expected to happen Should show the workflow after we create and save it. the log is : ```text org.mybatis.spring.MyBatisSystemException: nested exception is org.apache.ibatis.exceptions.PersistenceException: ### Error querying database. Cause: com.baomidou.mybatisplus.core.exceptions.MybatisPlusException: Error: Method queryTotal execution error of sql : SELECT COUNT(1) FROM t_ds_process_definition td LEFT JOIN (SELECT process_definition_code, release_state AS schedule_release_state FROM t_ds_schedules GROUP BY process_definition_code, release_state) sc ON sc.process_definition_code = td.code LEFT JOIN t_ds_user tu ON td.user_id = tu.id WHERE td.project_code = ? .... Caused by: org.h2.jdbc.JdbcSQLSyntaxErrorException: Column "process_definition_code" not found; SQL statement: CREATE FORCE VIEW ( SELECT "process_definition_code", "release_state" AS "schedule_release_state" FROM "public"."t_ds_schedules" GROUP BY "process_definition_code", "release_state" ) AS SELECT "process_definition_code", "release_state" AS "schedule_release_state" FROM "public"."t_ds_schedules" GROUP BY "process_definition_code", "release_state" [42122-200] ``` ### How to reproduce 1. Start StandaloneServer. 2. Login dolphinscheduler and create a project. 3. Create a workflow in the project we created right now. 4. Add some node in the workflow 5. Click save button to save the workflow 6. The web page show success message, but exception appears in the IDE. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6214
https://github.com/apache/dolphinscheduler/pull/6215
6aafa81affca5fcab05c94cbcc94738c5f271e6b
a1e447d39969362b5b380b6b802ce47d961bde51
"2021-09-15T02:39:04Z"
java
"2021-09-16T13:18:03Z"
sql/dolphinscheduler_h2.sql
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ SET FOREIGN_KEY_CHECKS=0; -- ---------------------------- -- Table structure for QRTZ_JOB_DETAILS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_JOB_DETAILS; CREATE TABLE QRTZ_JOB_DETAILS ( SCHED_NAME varchar(120) NOT NULL, JOB_NAME varchar(200) NOT NULL, JOB_GROUP varchar(200) NOT NULL, DESCRIPTION varchar(250) DEFAULT NULL, JOB_CLASS_NAME varchar(250) NOT NULL, IS_DURABLE varchar(1) NOT NULL, IS_NONCONCURRENT varchar(1) NOT NULL, IS_UPDATE_DATA varchar(1) NOT NULL, REQUESTS_RECOVERY varchar(1) NOT NULL, JOB_DATA blob, PRIMARY KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) ); -- ---------------------------- -- Table structure for QRTZ_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_TRIGGERS; CREATE TABLE QRTZ_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, JOB_NAME varchar(200) NOT NULL, JOB_GROUP varchar(200) NOT NULL, DESCRIPTION varchar(250) DEFAULT NULL, NEXT_FIRE_TIME bigint(13) DEFAULT NULL, PREV_FIRE_TIME bigint(13) DEFAULT NULL, PRIORITY int(11) DEFAULT NULL, TRIGGER_STATE varchar(16) NOT NULL, TRIGGER_TYPE varchar(8) NOT NULL, START_TIME bigint(13) NOT NULL, END_TIME bigint(13) DEFAULT NULL, CALENDAR_NAME varchar(200) DEFAULT NULL, MISFIRE_INSTR smallint(2) DEFAULT NULL, JOB_DATA blob, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, JOB_NAME, JOB_GROUP) REFERENCES QRTZ_JOB_DETAILS (SCHED_NAME, JOB_NAME, JOB_GROUP) ); -- ---------------------------- -- Table structure for QRTZ_BLOB_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_BLOB_TRIGGERS; CREATE TABLE QRTZ_BLOB_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, BLOB_DATA blob, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_BLOB_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CALENDARS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_CALENDARS; CREATE TABLE QRTZ_CALENDARS ( SCHED_NAME varchar(120) NOT NULL, CALENDAR_NAME varchar(200) NOT NULL, CALENDAR blob NOT NULL, PRIMARY KEY (SCHED_NAME, CALENDAR_NAME) ); -- ---------------------------- -- Records of QRTZ_CALENDARS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_CRON_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_CRON_TRIGGERS; CREATE TABLE QRTZ_CRON_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, CRON_EXPRESSION varchar(120) NOT NULL, TIME_ZONE_ID varchar(80) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_CRON_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_CRON_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_FIRED_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_FIRED_TRIGGERS; CREATE TABLE QRTZ_FIRED_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, ENTRY_ID varchar(200) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, INSTANCE_NAME varchar(200) NOT NULL, FIRED_TIME bigint(13) NOT NULL, SCHED_TIME bigint(13) NOT NULL, PRIORITY int(11) NOT NULL, STATE varchar(16) NOT NULL, JOB_NAME varchar(200) DEFAULT NULL, JOB_GROUP varchar(200) DEFAULT NULL, IS_NONCONCURRENT varchar(1) DEFAULT NULL, REQUESTS_RECOVERY varchar(1) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, ENTRY_ID) ); -- ---------------------------- -- Records of QRTZ_FIRED_TRIGGERS -- ---------------------------- -- ---------------------------- -- Records of QRTZ_JOB_DETAILS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_LOCKS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_LOCKS; CREATE TABLE QRTZ_LOCKS ( SCHED_NAME varchar(120) NOT NULL, LOCK_NAME varchar(40) NOT NULL, PRIMARY KEY (SCHED_NAME, LOCK_NAME) ); -- ---------------------------- -- Records of QRTZ_LOCKS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_PAUSED_TRIGGER_GRPS; CREATE TABLE QRTZ_PAUSED_TRIGGER_GRPS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_PAUSED_TRIGGER_GRPS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SCHEDULER_STATE -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SCHEDULER_STATE; CREATE TABLE QRTZ_SCHEDULER_STATE ( SCHED_NAME varchar(120) NOT NULL, INSTANCE_NAME varchar(200) NOT NULL, LAST_CHECKIN_TIME bigint(13) NOT NULL, CHECKIN_INTERVAL bigint(13) NOT NULL, PRIMARY KEY (SCHED_NAME, INSTANCE_NAME) ); -- ---------------------------- -- Records of QRTZ_SCHEDULER_STATE -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPLE_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SIMPLE_TRIGGERS; CREATE TABLE QRTZ_SIMPLE_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, REPEAT_COUNT bigint(7) NOT NULL, REPEAT_INTERVAL bigint(12) NOT NULL, TIMES_TRIGGERED bigint(10) NOT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPLE_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_SIMPLE_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for QRTZ_SIMPROP_TRIGGERS -- ---------------------------- DROP TABLE IF EXISTS QRTZ_SIMPROP_TRIGGERS; CREATE TABLE QRTZ_SIMPROP_TRIGGERS ( SCHED_NAME varchar(120) NOT NULL, TRIGGER_NAME varchar(200) NOT NULL, TRIGGER_GROUP varchar(200) NOT NULL, STR_PROP_1 varchar(512) DEFAULT NULL, STR_PROP_2 varchar(512) DEFAULT NULL, STR_PROP_3 varchar(512) DEFAULT NULL, INT_PROP_1 int(11) DEFAULT NULL, INT_PROP_2 int(11) DEFAULT NULL, LONG_PROP_1 bigint(20) DEFAULT NULL, LONG_PROP_2 bigint(20) DEFAULT NULL, DEC_PROP_1 decimal(13, 4) DEFAULT NULL, DEC_PROP_2 decimal(13, 4) DEFAULT NULL, BOOL_PROP_1 varchar(1) DEFAULT NULL, BOOL_PROP_2 varchar(1) DEFAULT NULL, PRIMARY KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP), CONSTRAINT QRTZ_SIMPROP_TRIGGERS_ibfk_1 FOREIGN KEY (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) REFERENCES QRTZ_TRIGGERS (SCHED_NAME, TRIGGER_NAME, TRIGGER_GROUP) ); -- ---------------------------- -- Records of QRTZ_SIMPROP_TRIGGERS -- ---------------------------- -- ---------------------------- -- Records of QRTZ_TRIGGERS -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_access_token -- ---------------------------- DROP TABLE IF EXISTS t_ds_access_token; CREATE TABLE t_ds_access_token ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) DEFAULT NULL, token varchar(64) DEFAULT NULL, expire_time datetime DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_access_token -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alert -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert; CREATE TABLE t_ds_alert ( id int(11) NOT NULL AUTO_INCREMENT, title varchar(64) DEFAULT NULL, content text, alert_status tinyint(4) DEFAULT '0', log text, alertgroup_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_alert -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_alertgroup -- ---------------------------- DROP TABLE IF EXISTS t_ds_alertgroup; CREATE TABLE t_ds_alertgroup ( id int(11) NOT NULL AUTO_INCREMENT, alert_instance_ids varchar(255) DEFAULT NULL, create_user_id int(11) DEFAULT NULL, group_name varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_alertgroup_name_un (group_name) ); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_command -- ---------------------------- DROP TABLE IF EXISTS t_ds_command; CREATE TABLE t_ds_command ( id int(11) NOT NULL AUTO_INCREMENT, command_type tinyint(4) DEFAULT NULL, process_definition_id int(11) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, executor_id int(11) DEFAULT NULL, update_time datetime DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64), environment_code bigint(20) DEFAULT '-1', PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_datasource -- ---------------------------- DROP TABLE IF EXISTS t_ds_datasource; CREATE TABLE t_ds_datasource ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(64) NOT NULL, note varchar(255) DEFAULT NULL, type tinyint(4) NOT NULL, user_id int(11) NOT NULL, connection_params text NOT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_datasource_name_un (name, type) ); -- ---------------------------- -- Records of t_ds_datasource -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_error_command -- ---------------------------- DROP TABLE IF EXISTS t_ds_error_command; CREATE TABLE t_ds_error_command ( id int(11) NOT NULL, command_type tinyint(4) DEFAULT NULL, executor_id int(11) DEFAULT NULL, process_definition_id int(11) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64), environment_code bigint(20) DEFAULT '-1', message text, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_error_command -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_definition; CREATE TABLE t_ds_process_definition ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(255) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, release_state tinyint(4) DEFAULT NULL, user_id int(11) DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT NULL, locations text, warning_group_id int(11) DEFAULT NULL, timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY process_unique (name,project_code) USING BTREE, UNIQUE KEY code_unique (code) ); -- ---------------------------- -- Records of t_ds_process_definition -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_process_definition_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_definition_log; CREATE TABLE t_ds_process_definition_log ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, release_state tinyint(4) DEFAULT NULL, user_id int(11) DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT NULL, locations text, warning_group_id int(11) DEFAULT NULL, timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_task_definition -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_definition; CREATE TABLE t_ds_task_definition ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, user_id int(11) DEFAULT NULL, task_type varchar(50) NOT NULL, task_params longtext, flag tinyint(2) DEFAULT NULL, task_priority tinyint(4) DEFAULT NULL, worker_group varchar(200) DEFAULT NULL, environment_code bigint(20) DEFAULT '-1', fail_retry_times int(11) DEFAULT NULL, fail_retry_interval int(11) DEFAULT NULL, timeout_flag tinyint(2) DEFAULT '0', timeout_notify_strategy tinyint(4) DEFAULT NULL, timeout int(11) DEFAULT '0', delay_time int(11) DEFAULT '0', resource_ids varchar(255) DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id, code), UNIQUE KEY task_unique (name,project_code) USING BTREE ); -- ---------------------------- -- Table structure for t_ds_task_definition_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_definition_log; CREATE TABLE t_ds_task_definition_log ( id int(11) NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(200) DEFAULT NULL, version int(11) DEFAULT NULL, description text, project_code bigint(20) NOT NULL, user_id int(11) DEFAULT NULL, task_type varchar(50) NOT NULL, task_params text, flag tinyint(2) DEFAULT NULL, task_priority tinyint(4) DEFAULT NULL, worker_group varchar(200) DEFAULT NULL, environment_code bigint(20) DEFAULT '-1', fail_retry_times int(11) DEFAULT NULL, fail_retry_interval int(11) DEFAULT NULL, timeout_flag tinyint(2) DEFAULT '0', timeout_notify_strategy tinyint(4) DEFAULT NULL, timeout int(11) DEFAULT '0', delay_time int(11) DEFAULT '0', resource_ids varchar(255) DEFAULT NULL, operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_task_relation -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_task_relation; CREATE TABLE t_ds_process_task_relation ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(200) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, project_code bigint(20) NOT NULL, process_definition_code bigint(20) NOT NULL, pre_task_code bigint(20) NOT NULL, pre_task_version int(11) NOT NULL, post_task_code bigint(20) NOT NULL, post_task_version int(11) NOT NULL, condition_type tinyint(2) DEFAULT NULL, condition_params text, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_task_relation_log -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_task_relation_log; CREATE TABLE t_ds_process_task_relation_log ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(200) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, project_code bigint(20) NOT NULL, process_definition_code bigint(20) NOT NULL, pre_task_code bigint(20) NOT NULL, pre_task_version int(11) NOT NULL, post_task_code bigint(20) NOT NULL, post_task_version int(11) NOT NULL, condition_type tinyint(2) DEFAULT NULL, condition_params text, operator int(11) DEFAULT NULL, operate_time datetime DEFAULT NULL, create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_process_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_process_instance; CREATE TABLE t_ds_process_instance ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(255) DEFAULT NULL, process_definition_version int(11) DEFAULT NULL, process_definition_code bigint(20) not NULL, state tinyint(4) DEFAULT NULL, recovery tinyint(4) DEFAULT NULL, start_time datetime DEFAULT NULL, end_time datetime DEFAULT NULL, run_times int(11) DEFAULT NULL, host varchar(135) DEFAULT NULL, command_type tinyint(4) DEFAULT NULL, command_param text, task_depend_type tinyint(4) DEFAULT NULL, max_try_times tinyint(4) DEFAULT '0', failure_strategy tinyint(4) DEFAULT '0', warning_type tinyint(4) DEFAULT '0', warning_group_id int(11) DEFAULT NULL, schedule_time datetime DEFAULT NULL, command_start_time datetime DEFAULT NULL, global_params text, flag tinyint(4) DEFAULT '1', update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, is_sub_process int(11) DEFAULT '0', executor_id int(11) NOT NULL, history_cmd text, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT NULL, environment_code bigint(20) DEFAULT '-1', timeout int(11) DEFAULT '0', tenant_id int(11) NOT NULL DEFAULT '-1', var_pool longtext, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_project -- ---------------------------- DROP TABLE IF EXISTS t_ds_project; CREATE TABLE t_ds_project ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(100) DEFAULT NULL, code bigint(20) NOT NULL, description varchar(200) DEFAULT NULL, user_id int(11) DEFAULT NULL, flag tinyint(4) DEFAULT '1', create_time datetime NOT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_project -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_queue -- ---------------------------- DROP TABLE IF EXISTS t_ds_queue; CREATE TABLE t_ds_queue ( id int(11) NOT NULL AUTO_INCREMENT, queue_name varchar(64) DEFAULT NULL, queue varchar(64) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_queue -- ---------------------------- INSERT INTO t_ds_queue VALUES ('1', 'default', 'default', null, null); -- ---------------------------- -- Table structure for t_ds_relation_datasource_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_datasource_user; CREATE TABLE t_ds_relation_datasource_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, datasource_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_datasource_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_process_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_process_instance; CREATE TABLE t_ds_relation_process_instance ( id int(11) NOT NULL AUTO_INCREMENT, parent_process_instance_id int(11) DEFAULT NULL, parent_task_instance_id int(11) DEFAULT NULL, process_instance_id int(11) DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_process_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_project_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_project_user; CREATE TABLE t_ds_relation_project_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, project_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_project_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_resources_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_resources_user; CREATE TABLE t_ds_relation_resources_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, resources_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_relation_resources_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_relation_udfs_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_relation_udfs_user; CREATE TABLE t_ds_relation_udfs_user ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, udf_id int(11) DEFAULT NULL, perm int(11) DEFAULT '1', create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Table structure for t_ds_resources -- ---------------------------- DROP TABLE IF EXISTS t_ds_resources; CREATE TABLE t_ds_resources ( id int(11) NOT NULL AUTO_INCREMENT, alias varchar(64) DEFAULT NULL, file_name varchar(64) DEFAULT NULL, description varchar(255) DEFAULT NULL, user_id int(11) DEFAULT NULL, type tinyint(4) DEFAULT NULL, size bigint(20) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, pid int(11) DEFAULT NULL, full_name varchar(64) DEFAULT NULL, is_directory tinyint(4) DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY t_ds_resources_un (full_name, type) ); -- ---------------------------- -- Records of t_ds_resources -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_schedules -- ---------------------------- DROP TABLE IF EXISTS t_ds_schedules; CREATE TABLE t_ds_schedules ( id int(11) NOT NULL AUTO_INCREMENT, process_definition_id int(11) NOT NULL, start_time datetime NOT NULL, end_time datetime NOT NULL, timezone_id varchar(40) DEFAULT NULL, crontab varchar(255) NOT NULL, failure_strategy tinyint(4) NOT NULL, user_id int(11) NOT NULL, release_state tinyint(4) NOT NULL, warning_type tinyint(4) NOT NULL, warning_group_id int(11) DEFAULT NULL, process_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT '', environment_code bigint(20) DEFAULT '-1', create_time datetime NOT NULL, update_time datetime NOT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_schedules -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_session -- ---------------------------- DROP TABLE IF EXISTS t_ds_session; CREATE TABLE t_ds_session ( id varchar(64) NOT NULL, user_id int(11) DEFAULT NULL, ip varchar(45) DEFAULT NULL, last_login_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_session -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_task_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_task_instance; CREATE TABLE t_ds_task_instance ( id int(11) NOT NULL AUTO_INCREMENT, name varchar(255) DEFAULT NULL, task_type varchar(50) NOT NULL, task_code bigint(20) NOT NULL, task_definition_version int(11) DEFAULT NULL, process_instance_id int(11) DEFAULT NULL, state tinyint(4) DEFAULT NULL, submit_time datetime DEFAULT NULL, start_time datetime DEFAULT NULL, end_time datetime DEFAULT NULL, host varchar(135) DEFAULT NULL, execute_path varchar(200) DEFAULT NULL, log_path varchar(200) DEFAULT NULL, alert_flag tinyint(4) DEFAULT NULL, retry_times int(4) DEFAULT '0', pid int(4) DEFAULT NULL, app_link text, task_params text, flag tinyint(4) DEFAULT '1', retry_interval int(4) DEFAULT NULL, max_retry_times int(2) DEFAULT NULL, task_instance_priority int(11) DEFAULT NULL, worker_group varchar(64) DEFAULT NULL, environment_code bigint(20) DEFAULT '-1', environment_config text DEFAULT '', executor_id int(11) DEFAULT NULL, first_submit_time datetime DEFAULT NULL, delay_time int(4) DEFAULT '0', var_pool longtext, PRIMARY KEY (id), FOREIGN KEY (process_instance_id) REFERENCES t_ds_process_instance (id) ON DELETE CASCADE ); -- ---------------------------- -- Records of t_ds_task_instance -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_tenant -- ---------------------------- DROP TABLE IF EXISTS t_ds_tenant; CREATE TABLE t_ds_tenant ( id int(11) NOT NULL AUTO_INCREMENT, tenant_code varchar(64) DEFAULT NULL, description varchar(255) DEFAULT NULL, queue_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_tenant -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_udfs -- ---------------------------- DROP TABLE IF EXISTS t_ds_udfs; CREATE TABLE t_ds_udfs ( id int(11) NOT NULL AUTO_INCREMENT, user_id int(11) NOT NULL, func_name varchar(100) NOT NULL, class_name varchar(255) NOT NULL, type tinyint(4) NOT NULL, arg_types varchar(255) DEFAULT NULL, database varchar(255) DEFAULT NULL, description varchar(255) DEFAULT NULL, resource_id int(11) NOT NULL, resource_name varchar(255) NOT NULL, create_time datetime NOT NULL, update_time datetime NOT NULL, PRIMARY KEY (id) ); -- ---------------------------- -- Records of t_ds_udfs -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_user -- ---------------------------- DROP TABLE IF EXISTS t_ds_user; CREATE TABLE t_ds_user ( id int(11) NOT NULL AUTO_INCREMENT, user_name varchar(64) DEFAULT NULL, user_password varchar(64) DEFAULT NULL, user_type tinyint(4) DEFAULT NULL, email varchar(64) DEFAULT NULL, phone varchar(11) DEFAULT NULL, tenant_id int(11) DEFAULT NULL, create_time datetime DEFAULT NULL, update_time datetime DEFAULT NULL, queue varchar(64) DEFAULT NULL, state int(1) DEFAULT 1, PRIMARY KEY (id), UNIQUE KEY user_name_unique (user_name) ); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_worker_group -- ---------------------------- DROP TABLE IF EXISTS t_ds_worker_group; CREATE TABLE t_ds_worker_group ( id bigint(11) NOT NULL AUTO_INCREMENT, name varchar(255) NOT NULL, addr_list text NULL DEFAULT NULL, create_time datetime NULL DEFAULT NULL, update_time datetime NULL DEFAULT NULL, PRIMARY KEY (id), UNIQUE KEY name_unique (name) ); -- ---------------------------- -- Records of t_ds_worker_group -- ---------------------------- -- ---------------------------- -- Table structure for t_ds_version -- ---------------------------- DROP TABLE IF EXISTS t_ds_version; CREATE TABLE t_ds_version ( id int(11) NOT NULL AUTO_INCREMENT, version varchar(200) NOT NULL, PRIMARY KEY (id), UNIQUE KEY version_UNIQUE (version) ); -- ---------------------------- -- Records of t_ds_version -- ---------------------------- INSERT INTO t_ds_version VALUES ('1', '1.4.0'); -- ---------------------------- -- Records of t_ds_alertgroup -- ---------------------------- INSERT INTO t_ds_alertgroup(alert_instance_ids, create_user_id, group_name, description, create_time, update_time) VALUES ('1,2', 1, 'default admin warning group', 'default admin warning group', '2018-11-29 10:20:39', '2018-11-29 10:20:39'); -- ---------------------------- -- Records of t_ds_user -- ---------------------------- INSERT INTO t_ds_user VALUES ('1', 'admin', '7ad2410b2f4c074479a8937a28a22b8f', '0', 'xxx@qq.com', '', '0', '2018-03-27 15:48:50', '2018-10-24 17:40:22', null, 1); -- ---------------------------- -- Table structure for t_ds_plugin_define -- ---------------------------- DROP TABLE IF EXISTS t_ds_plugin_define; CREATE TABLE t_ds_plugin_define ( id int NOT NULL AUTO_INCREMENT, plugin_name varchar(100) NOT NULL, plugin_type varchar(100) NOT NULL, plugin_params text, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id), UNIQUE KEY t_ds_plugin_define_UN (plugin_name,plugin_type) ); -- ---------------------------- -- Table structure for t_ds_alert_plugin_instance -- ---------------------------- DROP TABLE IF EXISTS t_ds_alert_plugin_instance; CREATE TABLE t_ds_alert_plugin_instance ( id int NOT NULL AUTO_INCREMENT, plugin_define_id int NOT NULL, plugin_instance_params text, create_time timestamp NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, instance_name varchar(200) DEFAULT NULL, PRIMARY KEY (id) ); -- -- Table structure for table t_ds_environment -- DROP TABLE IF EXISTS t_ds_environment; CREATE TABLE t_ds_environment ( id int NOT NULL AUTO_INCREMENT, code bigint(20) NOT NULL, name varchar(100) DEFAULT NULL, config text DEFAULT NULL, description text, operator int DEFAULT NULL, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id), UNIQUE KEY environment_name_unique (name), UNIQUE KEY environment_code_unique (code) ); -- -- Table structure for table t_ds_environment_worker_group_relation -- DROP TABLE IF EXISTS t_ds_environment_worker_group_relation; CREATE TABLE t_ds_environment_worker_group_relation ( id int NOT NULL AUTO_INCREMENT, environment_code bigint(20) NOT NULL, worker_group varchar(255) NOT NULL, operator int DEFAULT NULL, create_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, update_time timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (id) , UNIQUE KEY environment_worker_group_unique (environment_code,worker_group) );
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,245
[Bug] [API] TaskDefinitionLog save error when creating a workflow
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ## branch: dev ### when i want to create a workflow, i click the save button, but the back-end reported an exception: ```text 2021-09-17 15:23:01.231 ERROR 57816 --- [tp1516958404-78] o.a.d.a.exceptions.ApiExceptionHandler : 创建工作流错误 org.mybatis.spring.MyBatisSystemException: nested exception is org.apache.ibatis.binding.BindingException: Parameter 'taskDefinition' not found. Available parameters are [taskDefinitionLogs, param1] at org.mybatis.spring.MyBatisExceptionTranslator.translateExceptionIfPossible(MyBatisExceptionTranslator.java:78) ~[mybatis-spring-2.0.2.jar:2.0.2] at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:440) ~[mybatis-spring-2.0.2.jar:2.0.2] at com.sun.proxy.$Proxy10.insert(Unknown Source) ~[na:na] at org.mybatis.spring.SqlSessionTemplate.insert(SqlSessionTemplate.java:271) ~[mybatis-spring-2.0.2.jar:2.0.2] at com.baomidou.mybatisplus.core.override.MybatisMapperMethod.execute(MybatisMapperMethod.java:58) ~[mybatis-plus-core-3.2.0.jar:3.2.0] at com.baomidou.mybatisplus.core.override.MybatisMapperProxy.invoke(MybatisMapperProxy.java:61) ~[mybatis-plus-core-3.2.0.jar:3.2.0] at com.sun.proxy.$Proxy133.batchInsert(Unknown Source) ~[na:na] at org.apache.dolphinscheduler.service.process.ProcessService.saveTaskDefine(ProcessService.java:2205) ~[classes/:na] at org.apache.dolphinscheduler.service.process.ProcessService$$FastClassBySpringCGLIB$$ed138739.invoke(<generated>) ~[classes/:na] at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) ~[spring-core-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:687) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.apache.dolphinscheduler.service.process.ProcessService$$EnhancerBySpringCGLIB$$29c15cf5.saveTaskDefine(<generated>) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.createTaskDefinition(ProcessDefinitionServiceImpl.java:249) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl.createProcessDefinition(ProcessDefinitionServiceImpl.java:199) ~[classes/:na] at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$FastClassBySpringCGLIB$$e8e34ed9.invoke(<generated>) ~[classes/:na] at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) ~[spring-core-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.transaction.interceptor.TransactionAspectSupport.invokeWithinTransaction(TransactionAspectSupport.java:295) ~[spring-tx-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.transaction.interceptor.TransactionInterceptor.invoke(TransactionInterceptor.java:98) ~[spring-tx-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl$$EnhancerBySpringCGLIB$$9efa77d5.createProcessDefinition(<generated>) ~[classes/:na] at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController.createProcessDefinition(ProcessDefinitionController.java:130) ~[classes/:na] at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$FastClassBySpringCGLIB$$dc9bf5db.invoke(<generated>) ~[classes/:na] at org.springframework.cglib.proxy.MethodProxy.invoke(MethodProxy.java:218) ~[spring-core-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.CglibAopProxy$CglibMethodInvocation.invokeJoinpoint(CglibAopProxy.java:752) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:163) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.aspectj.MethodInvocationProceedingJoinPoint.proceed(MethodInvocationProceedingJoinPoint.java:88) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.apache.dolphinscheduler.api.aspect.AccessLogAspect.doAround(AccessLogAspect.java:87) ~[classes/:na] at sun.reflect.GeneratedMethodAccessor169.invoke(Unknown Source) ~[na:na] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_292] at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_292] at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethodWithGivenArgs(AbstractAspectJAdvice.java:644) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.aspectj.AbstractAspectJAdvice.invokeAdviceMethod(AbstractAspectJAdvice.java:633) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.aspectj.AspectJAroundAdvice.invoke(AspectJAroundAdvice.java:70) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:175) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.interceptor.ExposeInvocationInterceptor.invoke(ExposeInvocationInterceptor.java:93) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.ReflectiveMethodInvocation.proceed(ReflectiveMethodInvocation.java:186) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.aop.framework.CglibAopProxy$DynamicAdvisedInterceptor.intercept(CglibAopProxy.java:691) ~[spring-aop-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.apache.dolphinscheduler.api.controller.ProcessDefinitionController$$EnhancerBySpringCGLIB$$e38856c1.createProcessDefinition(<generated>) ~[classes/:na] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_292] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_292] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_292] at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_292] at org.springframework.web.method.support.InvocableHandlerMethod.doInvoke(InvocableHandlerMethod.java:190) ~[spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.method.support.InvocableHandlerMethod.invokeForRequest(InvocableHandlerMethod.java:138) ~[spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.mvc.method.annotation.ServletInvocableHandlerMethod.invokeAndHandle(ServletInvocableHandlerMethod.java:105) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.invokeHandlerMethod(RequestMappingHandlerAdapter.java:892) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.mvc.method.annotation.RequestMappingHandlerAdapter.handleInternal(RequestMappingHandlerAdapter.java:797) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.mvc.method.AbstractHandlerMethodAdapter.handle(AbstractHandlerMethodAdapter.java:87) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.DispatcherServlet.doDispatch(DispatcherServlet.java:1040) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.DispatcherServlet.doService(DispatcherServlet.java:943) ~[spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.FrameworkServlet.processRequest(FrameworkServlet.java:1006) [spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.servlet.FrameworkServlet.doPost(FrameworkServlet.java:909) [spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at javax.servlet.http.HttpServlet.service(HttpServlet.java:707) [javax.servlet-api-3.1.0.jar:3.1.0] at org.springframework.web.servlet.FrameworkServlet.service(FrameworkServlet.java:883) [spring-webmvc-5.1.19.RELEASE.jar:5.1.19.RELEASE] at javax.servlet.http.HttpServlet.service(HttpServlet.java:790) [javax.servlet-api-3.1.0.jar:3.1.0] at org.eclipse.jetty.servlet.ServletHolder.handle(ServletHolder.java:763) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$ChainEnd.doFilter(ServletHandler.java:1633) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at com.github.xiaoymin.swaggerbootstrapui.filter.SecurityBasicAuthFilter.doFilter(SecurityBasicAuthFilter.java:84) [swagger-bootstrap-ui-1.9.3.jar:na] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at com.github.xiaoymin.swaggerbootstrapui.filter.ProductionSecurityFilter.doFilter(ProductionSecurityFilter.java:53) [swagger-bootstrap-ui-1.9.3.jar:na] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.springframework.web.filter.CorsFilter.doFilterInternal(CorsFilter.java:97) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.springframework.web.filter.RequestContextFilter.doFilterInternal(RequestContextFilter.java:100) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.springframework.web.filter.FormContentFilter.doFilterInternal(FormContentFilter.java:93) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.springframework.web.filter.HiddenHttpMethodFilter.doFilterInternal(HiddenHttpMethodFilter.java:94) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.springframework.web.filter.CharacterEncodingFilter.doFilterInternal(CharacterEncodingFilter.java:201) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.springframework.web.filter.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:119) [spring-web-5.1.19.RELEASE.jar:5.1.19.RELEASE] at org.eclipse.jetty.servlet.FilterHolder.doFilter(FilterHolder.java:193) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler$Chain.doFilter(ServletHandler.java:1609) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:561) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:602) [jetty-security-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:235) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1612) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:233) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1434) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:188) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:501) [jetty-servlet-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1582) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:186) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1349) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:766) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:127) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.Server.handle(Server.java:516) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.HttpChannel.lambda$handle$1(HttpChannel.java:383) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.HttpChannel.dispatch(HttpChannel.java:556) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:375) [jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:273) ~[jetty-server-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:311) ~[jetty-io-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:105) ~[jetty-io-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.io.ChannelEndPoint$1.run(ChannelEndPoint.java:104) ~[jetty-io-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:336) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:313) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:129) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:375) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:773) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:905) ~[jetty-util-9.4.33.v20201020.jar:9.4.33.v20201020] at java.lang.Thread.run(Thread.java:748) ~[na:1.8.0_292] Caused by: org.apache.ibatis.binding.BindingException: Parameter 'taskDefinition' not found. Available parameters are [taskDefinitionLogs, param1] at org.apache.ibatis.binding.MapperMethod$ParamMap.get(MapperMethod.java:212) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.reflection.wrapper.MapWrapper.get(MapWrapper.java:45) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.reflection.MetaObject.getValue(MetaObject.java:122) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.reflection.MetaObject.metaObjectForProperty(MetaObject.java:145) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.reflection.MetaObject.getValue(MetaObject.java:115) ~[mybatis-3.5.2.jar:3.5.2] at com.baomidou.mybatisplus.core.MybatisDefaultParameterHandler.setParameters(MybatisDefaultParameterHandler.java:220) ~[mybatis-plus-core-3.2.0.jar:3.2.0] at org.apache.ibatis.executor.statement.PreparedStatementHandler.parameterize(PreparedStatementHandler.java:94) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.executor.statement.RoutingStatementHandler.parameterize(RoutingStatementHandler.java:64) ~[mybatis-3.5.2.jar:3.5.2] at sun.reflect.GeneratedMethodAccessor140.invoke(Unknown Source) ~[na:na] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_292] at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_292] at org.apache.ibatis.plugin.Plugin.invoke(Plugin.java:63) ~[mybatis-3.5.2.jar:3.5.2] at com.sun.proxy.$Proxy17.parameterize(Unknown Source) ~[na:na] at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.prepareStatement(MybatisSimpleExecutor.java:99) ~[mybatis-plus-core-3.2.0.jar:3.2.0] at com.baomidou.mybatisplus.core.executor.MybatisSimpleExecutor.doUpdate(MybatisSimpleExecutor.java:53) ~[mybatis-plus-core-3.2.0.jar:3.2.0] at org.apache.ibatis.executor.BaseExecutor.update(BaseExecutor.java:117) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.session.defaults.DefaultSqlSession.update(DefaultSqlSession.java:197) ~[mybatis-3.5.2.jar:3.5.2] at org.apache.ibatis.session.defaults.DefaultSqlSession.insert(DefaultSqlSession.java:184) ~[mybatis-3.5.2.jar:3.5.2] at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[na:1.8.0_292] at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) ~[na:1.8.0_292] at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[na:1.8.0_292] at java.lang.reflect.Method.invoke(Method.java:498) ~[na:1.8.0_292] at org.mybatis.spring.SqlSessionTemplate$SqlSessionInterceptor.invoke(SqlSessionTemplate.java:426) ~[mybatis-spring-2.0.2.jar:2.0.2] ... 116 common frames omitted ``` ### What you expected to happen Create workflow should success. ### How to reproduce 1. create a project 2. create a new workflow 3. add a node 4. save the workflow 5. the error occurs ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6245
https://github.com/apache/dolphinscheduler/pull/6246
fced3892ee20d64c3eafe187cbd94508ac339a98
58da46b1291aa8bd2b6e73b3455a09188f40e145
"2021-09-17T07:25:01Z"
java
"2021-09-17T09:07:04Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/TaskDefinitionLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper"> <sql id="baseSql"> id, code, name, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, environment_code, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, operator, operate_time, create_time, update_time </sql> <select id="queryMaxVersionForDefinition" resultType="java.lang.Integer"> select max(version) from t_ds_task_definition_log WHERE code = #{code} </select> <select id="queryByDefinitionCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE code = #{code} and version = #{version} </select> <select id="queryByTaskDefinitions" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log WHERE 1 = 1 <if test="taskDefinitions != null and taskDefinitions.size != 0"> and <foreach collection="taskDefinitions" index="index" item="item" open="(" separator=" or " close=")"> (code = #{item.code} and version = #{item.version}) </foreach> </if> </select> <insert id="batchInsert"> insert into t_ds_task_definition_log (code, name, version, description, project_code, user_id, task_type, task_params, flag, task_priority, worker_group, environment_code, fail_retry_times, fail_retry_interval, timeout_flag, timeout_notify_strategy, timeout, delay_time, resource_ids, operator, operate_time, create_time, update_time) values <foreach collection="taskDefinitionLogs" item="taskDefinitionLog" separator=","> (#{taskDefinitionLog.code},#{taskDefinitionLog.name},#{taskDefinitionLog.version},#{taskDefinitionLog.description}, #{taskDefinitionLog.projectCode},#{taskDefinitionLog.userId},#{taskDefinitionLog.taskType},#{taskDefinitionLog.taskParams}, #{taskDefinitionLog.flag},#{taskDefinitionLog.taskPriority},#{taskDefinitionLog.workerGroup},#{taskDefinition.environmentCode}, #{taskDefinitionLog.failRetryTimes},#{taskDefinitionLog.failRetryInterval},#{taskDefinitionLog.timeoutFlag},#{taskDefinitionLog.timeoutNotifyStrategy}, #{taskDefinitionLog.timeout},#{taskDefinitionLog.delayTime},#{taskDefinitionLog.resourceIds},#{taskDefinitionLog.operator},#{taskDefinitionLog.operateTime}, #{taskDefinitionLog.createTime},#{taskDefinitionLog.updateTime}) </foreach> </insert> <delete id="deleteByCodeAndVersion"> delete from t_ds_task_definition_log where code = #{code} and version = #{version} </delete> <select id="queryTaskDefinitionVersionsPaging" resultType="org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog"> select <include refid="baseSql"/> from t_ds_task_definition_log where code = #{code} order by version desc </select> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,277
[Bug] [Master] tasks submit time error.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch : dev 10 parallel processing tasks in one DAG, run the DAG, you would find the task's submit-time interval is 5 seconds. ### What you expected to happen the parallel processing tasks submit-time should be the same. ### How to reproduce 10 parallel processing tasks in one DAG. run the dag check the submit-time of the tasks. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6277
https://github.com/apache/dolphinscheduler/pull/6278
540fd9dc0844a4e44dc5485d6001eb0dd18c8257
649723132bc8276ef221c37e7014c0cf6772be60
"2021-09-20T04:03:32Z"
java
"2021-09-20T06:57:53Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.toMap(taskInstance.getTaskParams(), String.class, Object.class); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinition); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public boolean saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return false; } } newTaskDefinitionLogs.add(taskDefinitionLog); } for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { int insert = taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); int update = taskDefinitionMapper.updateById(taskDefinitionToUpdate); if ((update & insert) != 1) { return false; } } } if (!newTaskDefinitionLogs.isEmpty()) { int insert = taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); int logInsert = taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); return (logInsert & insert) != 0; } return true; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,256
[Bug] [ALL] No install.sh for deploy
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened There seems no install.sh for deploying in dev branch ### What you expected to happen Added install.sh ### How to reproduce No need, we can not find install.sh after mvn install. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6256
https://github.com/apache/dolphinscheduler/pull/6257
649723132bc8276ef221c37e7014c0cf6772be60
7098855ee0f004b2d291a33f2740ca36edd64dbf
"2021-09-17T15:17:46Z"
java
"2021-09-20T16:53:25Z"
dolphinscheduler-server/src/main/resources/config/install_config.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # user data local directory path, please make sure the directory exists and have read write permissions dataBasedirPath="/tmp/dolphinscheduler" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort="8088" # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username keytabUserName="hdfs-mycluster@ESZ.COM" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # kerberos expire time, the unit is hour kerberosExpireTime="2" # use sudo or not sudoEnable="true" # worker tenant auto create workerTenantAutoCreate="false" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,256
[Bug] [ALL] No install.sh for deploy
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened There seems no install.sh for deploying in dev branch ### What you expected to happen Added install.sh ### How to reproduce No need, we can not find install.sh after mvn install. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6256
https://github.com/apache/dolphinscheduler/pull/6257
649723132bc8276ef221c37e7014c0cf6772be60
7098855ee0f004b2d291a33f2740ca36edd64dbf
"2021-09-17T15:17:46Z"
java
"2021-09-20T16:53:25Z"
install.sh
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,279
[Bug] [Master] Parallel tasks would be Irregularly submitted twice
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened parallels tasks in one DAG, run the dag, the tasks would be Irregularly submitted twice. ### What you expected to happen tasks in one DAG should be submitted once. ### How to reproduce 10 parallel tasks in one DAG. run the DAG, find the tasks submit history, you would find some tasks are submitted twice. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6279
https://github.com/apache/dolphinscheduler/pull/6280
7098855ee0f004b2d291a33f2740ca36edd64dbf
b71967b017f0d4c50f5837a3173ba0f35bf2c077
"2021-09-20T04:24:39Z"
java
"2021-09-21T04:04:31Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.HashBasedTable; import com.google.common.collect.Lists; import com.google.common.collect.Table; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * runing TaskNode */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * task exec service */ private final ExecutorService taskExecService; /** * process instance */ private ProcessInstance processInstance; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * recover node id list */ private List<TaskInstance> recoverNodeIdList = new ArrayList<>(); /** * error task list */ private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>(); /** * complete task list */ private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * depend failed task map */ private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>(); /** * forbidden task map */ private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>(); /** * skip task map */ private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>(); /** * recover tolerance fault task list */ private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>(); /** * alert manager */ private ProcessAlertManager processAlertManager; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * process service */ private ProcessService processService; /** * master config */ private MasterConfig masterConfig; /** * */ private NettyExecutorManager nettyExecutorManager; private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); private List<Date> complementListDate = Lists.newLinkedList(); private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create(); private ProcessDefinition processDefinition; private String key; private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param taskTimeoutCheckList */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; int masterTaskExecNum = masterConfig.getMasterExecTaskNum(); this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread", masterTaskExecNum); this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; } @Override public void run() { try { startProcess(); handleEvents(); } catch (Exception e) { logger.error("handler error:", e); } } private void handleEvents() { while (this.stateEvents.size() > 0) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("{}_{}_{}", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkStateEvent(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean taskTimeout(StateEvent stateEvent) { if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) { return true; } TaskInstance taskInstance = taskInstanceHashMap .row(stateEvent.getTaskInstanceId()) .values() .iterator().next(); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); if (stateEvent.getExecutionStatus().typeIsFinished()) { taskFinished(task); } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent.toString()); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); return; } ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId()); completeTaskList.put(task.getName(), task); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(task.getName()); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { submitPostNode(task.getName()); } else { errorTaskList.put(task.getName(), task); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } private boolean checkStateEvent(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent.toString()); return false; } return true; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); processInstance = processService.findProcessInstanceById(this.processInstance.getId()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (stateEvent.getExecutionStatus() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { // complement data ends || no success return false; } scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceHashMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { buildFlowDag(); if (this.taskInstanceHashMap.size() == 0) { initTaskQueue(); submitPostNode(null); } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList()); forbiddenTaskList.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskList.put(taskNode.getName(), taskNode); } }); // generate process to get DAG info List<String> recoveryNameList = getRecoveryNodeNameList(); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNameList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTask.clear(); completeTaskList.clear(); errorTaskList.clear(); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : taskInstanceList) { if (task.isTaskComplete()) { completeTaskList.put(task.getName(), task); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskList.put(task.getName(), task); } } if (complementListDate.size() == 0 && needComplementProcess()) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); if (startDate.after(endDate)) { Date tmp = startDate; startDate = endDate; endDate = tmp; } List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); complementListDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedules)); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval()); if (submit) { this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } else { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion() ); taskInstance.setTaskDefine(taskDefinition); if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); return; } if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskName : preTask) { TaskInstance preTaskInstance = completeTaskList.get(preTaskName); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } private void submitPostNode(String parentNodeName) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskList.containsKey(task.getName())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); } else { addTaskToStandByList(task); } } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskName) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskName)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskName); List<String> depNameList = taskNode.getDepList(); for (String depsNode : depNameList) { if (!dag.containsNode(depsNode) || forbiddenTaskList.containsKey(depsNode) || skipTaskNodeList.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskList.containsKey(depsNode)) { return DependResult.WAITING; } ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskName)) { return DependResult.FAILED; } } logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { if (entry.getValue().getState() == state) { resultList.add(entry.getValue()); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskList.size() > 0) { return true; } return this.dependFailedTask.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @param instance * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); ExecutionStatus state = getProcessInstanceState(instance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); instance.setState(state); processService.updateProcessInstance(instance); processInstance = instance; StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(taskInstance.getName()); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * whether the retry interval is timed out * * @param taskInstance task instance * @return Boolean */ private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) { if (taskInstance.getState() != ExecutionStatus.FAILURE) { return true; } if (taskInstance.getId() == 0 || taskInstance.getMaxRetryTimes() == 0 || taskInstance.getRetryInterval() == 0) { return true; } Date now = new Date(); long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime()); // task retry does not over time, return false return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval; } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(task.getName()); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (retryTaskIntervalOverTime(task)) { TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTask.put(task.getName(), task); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node name list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node name list */ private List<String> getRecoveryNodeNameList() { List<String> recoveryNodeNameList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeIdList)) { for (TaskInstance task : recoverNodeIdList) { recoveryNodeNameList.add(task.getName()); } } return recoveryNodeNameList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeNameList recovery node name list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeNameList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,286
[Bug] [API] Delete processDefinitionVersion bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When deleting the workflow version, the workflow relationship of the corresponding version should be deleted. Only this version of the workflow will now be deleted. ### What you expected to happen When deleting the workflow version, the workflow relationship of the corresponding version will be deleted. ### How to reproduce Operate deleteProcessDefinitionVersion ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6286
https://github.com/apache/dolphinscheduler/pull/6287
51e27bb4b64e8270d135dad7fd277124a43761c8
8980b7027a30f75445882280e5872b49985a5188
"2021-09-21T07:16:14Z"
java
"2021-09-21T14:35:50Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createProcessDefine(loginUser, result, taskRelationList, processDefinition, taskDefinitionLogs); } private void createTaskDefinition(Map<String, Object> result, User loginUser, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return; } } if (processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs)) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); } } private Map<String, Object> createProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream() .map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); createTaskDefinition(result, loginUser, projectCode, taskDefinitionLogs, taskDefinitionJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateProcessDefine(loginUser, result, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateProcessDefine(User loginUser, Map<String, Object> result, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { scheduleMapper.deleteById(schedule.getId()); } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); for (Schedule schedule : scheduleList) { logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); scheduleMapper.updateById(schedule); schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return false; } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } Map<String, Object> createProcessResult = createProcessDefine(loginUser, result, taskRelationLogList, processDefinition, null); if (Status.SUCCESS.equals(createProcessResult.get(Constants.STATUS))) { putMsg(createProcessResult, Status.SUCCESS); } else { result.putAll(createProcessResult); return false; } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); return false; } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); treeViewDto.setCode(taskNode.getCode()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); createProcessDefine(loginUser, result, taskRelationList, processDefinition, Lists.newArrayList()); } else { updateProcessDefine(loginUser, result, taskRelationList, processDefinition, null, Lists.newArrayList()); } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delele result code */ @Override public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,286
[Bug] [API] Delete processDefinitionVersion bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When deleting the workflow version, the workflow relationship of the corresponding version should be deleted. Only this version of the workflow will now be deleted. ### What you expected to happen When deleting the workflow version, the workflow relationship of the corresponding version will be deleted. ### How to reproduce Operate deleteProcessDefinitionVersion ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6286
https://github.com/apache/dolphinscheduler/pull/6287
51e27bb4b64e8270d135dad7fd277124a43761c8
8980b7027a30f75445882280e5872b49985a5188
"2021-09-21T07:16:14Z"
java
"2021-09-21T14:35:50Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationLogMapper.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.mapper; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.ibatis.annotations.Param; import java.util.List; import com.baomidou.mybatisplus.core.mapper.BaseMapper; /** * process task relation log mapper interface */ public interface ProcessTaskRelationLogMapper extends BaseMapper<ProcessTaskRelationLog> { /** * query process task relation log * * @param processCode process definition code * @param processVersion process version * @return process task relation log */ List<ProcessTaskRelationLog> queryByProcessCodeAndVersion(@Param("processCode") long processCode, @Param("processVersion") int processVersion); /** * batch insert process task relation * * @param taskRelationList taskRelationList * @return int */ int batchInsert(@Param("taskRelationList") List<ProcessTaskRelationLog> taskRelationList); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,286
[Bug] [API] Delete processDefinitionVersion bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When deleting the workflow version, the workflow relationship of the corresponding version should be deleted. Only this version of the workflow will now be deleted. ### What you expected to happen When deleting the workflow version, the workflow relationship of the corresponding version will be deleted. ### How to reproduce Operate deleteProcessDefinitionVersion ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6286
https://github.com/apache/dolphinscheduler/pull/6287
51e27bb4b64e8270d135dad7fd277124a43761c8
8980b7027a30f75445882280e5872b49985a5188
"2021-09-21T07:16:14Z"
java
"2021-09-21T14:35:50Z"
dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessTaskRelationLogMapper.xml
<?xml version="1.0" encoding="UTF-8" ?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd" > <mapper namespace="org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper"> <sql id="baseSql"> id, name, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, operator, operate_time, create_time, update_time </sql> <select id="queryByProcessCodeAndVersion" resultType="org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog"> select <include refid="baseSql"/> from t_ds_process_task_relation_log WHERE process_definition_code = #{processCode} and process_definition_version = #{processVersion} </select> <insert id="batchInsert"> insert into t_ds_process_task_relation_log (name, process_definition_version, project_code, process_definition_code, pre_task_code, pre_task_version, post_task_code, post_task_version, condition_type, condition_params, operator, operate_time, create_time, update_time) values <foreach collection="taskRelationList" item="relation" separator=","> (#{relation.name},#{relation.processDefinitionVersion},#{relation.projectCode},#{relation.processDefinitionCode}, #{relation.preTaskCode},#{relation.preTaskVersion},#{relation.postTaskCode},#{relation.postTaskVersion}, #{relation.conditionType},#{relation.conditionParams},#{relation.operator},#{relation.operateTime}, #{relation.createTime},#{relation.updateTime}) </foreach> </insert> </mapper>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,228
[Bug] [ui] When I create a new queue in the yarn queue, clicking New queue again will echo the last newly created queue value
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I create a new queue in the yarn queue, clicking New queue again will echo the last newly created queue value ![image](https://user-images.githubusercontent.com/39329477/133537702-eef9347b-3398-4dce-ab3b-0803368b0e67.png) ### What you expected to happen Clicking New queue again will not echo the last new queue value ### How to reproduce Create a yarn queue twice in a row ### Anything else -- ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6228
https://github.com/apache/dolphinscheduler/pull/6264
7a321505cfa622761941d0aa0f0e2b01893e7867
22277517c5a7f27bfa57aff8eadc81dd963c588c
"2021-09-16T02:09:48Z"
java
"2021-09-22T03:06:15Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/queue/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-list-construction :title="$t('Queue manage')"> <template slot="conditions"> <m-conditions @on-conditions="_onConditions"> <template slot="button-group" v-if="isADMIN"> <el-button size="mini" @click="_create('')">{{$t('Create queue')}}</el-button> <el-dialog :title="item ? $t('Edit queue') : $t('Create queue')" :v-if="createQueueDialog" :visible.sync="createQueueDialog" width="auto"> <m-create-queue :item="item" @onUpdate="onUpdate" @close="close"></m-create-queue> </el-dialog> </template> </m-conditions> </template> <template slot="content"> <template v-if="queueList.length || total>0"> <m-list @on-edit="_onEdit" :queue-list="queueList" :page-no="searchParams.pageNo" :page-size="searchParams.pageSize"> </m-list> <div class="page-box"> <el-pagination background @current-change="_page" @size-change="_pageSize" :page-size="searchParams.pageSize" :current-page.sync="searchParams.pageNo" :page-sizes="[10, 30, 50]" layout="sizes, prev, pager, next, jumper" :total="total"> </el-pagination> </div> </template> <template v-if="!queueList.length && total<=0"> <m-no-data></m-no-data> </template> <m-spin :is-spin="isLoading" :is-left="isLeft"></m-spin> </template> </m-list-construction> </template> <script> import _ from 'lodash' import { mapActions } from 'vuex' import mList from './_source/list' import store from '@/conf/home/store' import mSpin from '@/module/components/spin/spin' import mCreateQueue from './_source/createQueue' import mNoData from '@/module/components/noData/noData' import listUrlParamHandle from '@/module/mixin/listUrlParamHandle' import mConditions from '@/module/components/conditions/conditions' import mListConstruction from '@/module/components/listConstruction/listConstruction' export default { name: 'queue-index', data () { return { total: null, isLoading: true, queueList: [], searchParams: { pageSize: 10, pageNo: 1, searchVal: '' }, isLeft: true, isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER', item: {}, createQueueDialog: false } }, mixins: [listUrlParamHandle], props: {}, methods: { ...mapActions('security', ['getQueueListP']), /** * Query */ _onConditions (o) { this.searchParams = _.assign(this.searchParams, o) this.searchParams.pageNo = 1 }, _page (val) { this.searchParams.pageNo = val }, _pageSize (val) { this.searchParams.pageSize = val }, _onEdit (item) { this._create(item) }, _create (item) { this.item = item this.createQueueDialog = true }, onUpdate () { this._debounceGET('false') this.createQueueDialog = false }, close () { this.createQueueDialog = false }, _getList (flag) { if (sessionStorage.getItem('isLeft') === 0) { this.isLeft = false } else { this.isLeft = true } this.isLoading = !flag this.getQueueListP(this.searchParams).then(res => { if (this.searchParams.pageNo > 1 && res.totalList.length === 0) { this.searchParams.pageNo = this.searchParams.pageNo - 1 } else { this.queueList = [] this.queueList = res.totalList this.total = res.total this.isLoading = false } }).catch(e => { this.isLoading = false }) } }, watch: { // router '$route' (a) { // url no params get instance list this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo } }, created () { }, mounted () { }, beforeDestroy () { sessionStorage.setItem('isLeft', 1) }, components: { mList, mListConstruction, mConditions, mSpin, mNoData, mCreateQueue } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ProcessDefinitionController.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.controller; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_COPY_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.BATCH_MOVE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.CREATE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.DELETE_PROCESS_DEFINITION_VERSION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.IMPORT_PROCESS_DEFINE_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_LIST; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.QUERY_PROCESS_DEFINITION_VERSIONS_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.RELEASE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.UPDATE_PROCESS_DEFINITION_ERROR; import static org.apache.dolphinscheduler.api.enums.Status.VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR; import org.apache.dolphinscheduler.api.aspect.AccessLogAnnotation; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ApiException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; import org.springframework.web.bind.annotation.DeleteMapping; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.PostMapping; import org.springframework.web.bind.annotation.PutMapping; import org.springframework.web.bind.annotation.RequestAttribute; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.ResponseBody; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import org.springframework.web.multipart.MultipartFile; import io.swagger.annotations.Api; import io.swagger.annotations.ApiImplicitParam; import io.swagger.annotations.ApiImplicitParams; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.ApiParam; import springfox.documentation.annotations.ApiIgnore; /** * process definition controller */ @Api(tags = "PROCESS_DEFINITION_TAG") @RestController @RequestMapping("projects/{projectCode}/process-definition") public class ProcessDefinitionController extends BaseController { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionController.class); @Autowired private ProcessDefinitionService processDefinitionService; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams globalParams * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @ApiOperation(value = "createProcessDefinition", notes = "CREATE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "locations", value = "PROCESS_DEFINITION_LOCATIONS", required = true, type = "String"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String") }) @PostMapping() @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result createProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "locations", required = false) String locations, @RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "taskRelationJson", required = true) String taskRelationJson, @RequestParam(value = "taskDefinitionJson", required = true) String taskDefinitionJson) { Map<String, Object> result = processDefinitionService.createProcessDefinition(loginUser, projectCode, name, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson); return returnDataList(result); } /** * copy process definition * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param targetProjectCode target project code * @return copy result code */ @ApiOperation(value = "batchCopyByCodes", notes = "COPY_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, dataType = "String", example = "3,4"), @ApiImplicitParam(name = "targetProjectCode", value = "TARGET_PROJECT_CODE", required = true, dataType = "Long", example = "123") }) @PostMapping(value = "/batch-copy") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_COPY_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result copyProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "codes", required = true) String codes, @RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) { return returnDataList(processDefinitionService.batchCopyProcessDefinition(loginUser, projectCode, codes, targetProjectCode)); } /** * move process definition * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param targetProjectCode target project code * @return move result code */ @ApiOperation(value = "batchMoveByCodes", notes = "MOVE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, dataType = "String", example = "3,4"), @ApiImplicitParam(name = "targetProjectCode", value = "TARGET_PROJECT_CODE", required = true, dataType = "Long", example = "123") }) @PostMapping(value = "/batch-move") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_MOVE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result moveProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "codes", required = true) String codes, @RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) { return returnDataList(processDefinitionService.batchMoveProcessDefinition(loginUser, projectCode, codes, targetProjectCode)); } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @ApiOperation(value = "verify-name", notes = "VERIFY_PROCESS_DEFINITION_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String") }) @GetMapping(value = "/verify-name") @ResponseStatus(HttpStatus.OK) @ApiException(VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result verifyProcessDefinitionName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name) { Map<String, Object> result = processDefinitionService.verifyProcessDefinitionName(loginUser, projectCode, name); return returnDataList(result); } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams globalParams * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @ApiOperation(value = "update", notes = "UPDATE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "locations", value = "PROCESS_DEFINITION_LOCATIONS", required = true, type = "String"), @ApiImplicitParam(name = "description", value = "PROCESS_DEFINITION_DESC", required = false, type = "String"), @ApiImplicitParam(name = "releaseState", value = "RELEASE_PROCESS_DEFINITION_NOTES", required = false, dataType = "ReleaseState") }) @PutMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(UPDATE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result updateProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "name", required = true) String name, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "description", required = false) String description, @RequestParam(value = "globalParams", required = false, defaultValue = "[]") String globalParams, @RequestParam(value = "locations", required = false) String locations, @RequestParam(value = "timeout", required = false, defaultValue = "0") int timeout, @RequestParam(value = "tenantCode", required = true) String tenantCode, @RequestParam(value = "taskRelationJson", required = true) String taskRelationJson, @RequestParam(value = "taskDefinitionJson", required = true) String taskDefinitionJson, @RequestParam(value = "releaseState", required = false, defaultValue = "OFFLINE") ReleaseState releaseState) { Map<String, Object> result = processDefinitionService.updateProcessDefinition(loginUser, projectCode, name, code, description, globalParams, locations, timeout, tenantCode, taskRelationJson, taskDefinitionJson); // If the update fails, the result will be returned directly if (result.get(Constants.STATUS) != Status.SUCCESS) { return returnDataList(result); } // Judge whether to go online after editing,0 means offline, 1 means online if (releaseState == ReleaseState.ONLINE) { result = processDefinitionService.releaseProcessDefinition(loginUser, projectCode, code, releaseState); } return returnDataList(result); } /** * query process definition version paging list info * * @param loginUser login user info * @param projectCode project code * @param pageNo the process definition version list current page number * @param pageSize the process definition version list page size * @param code the process definition code * @return the process definition version list */ @ApiOperation(value = "queryVersions", notes = "QUERY_PROCESS_DEFINITION_VERSIONS_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1") }) @GetMapping(value = "/{code}/versions") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_VERSIONS_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionVersions(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "pageNo") int pageNo, @RequestParam(value = "pageSize") int pageSize, @PathVariable(value = "code") long code) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } result = processDefinitionService.queryProcessDefinitionVersions(loginUser, projectCode, pageNo, pageSize, code); return result; } /** * switch certain process definition version * * @param loginUser login user info * @param projectCode project code * @param code the process definition code * @param version the version user want to switch * @return switch version result code */ @ApiOperation(value = "switchVersion", notes = "SWITCH_PROCESS_DEFINITION_VERSION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1"), @ApiImplicitParam(name = "version", value = "VERSION", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{code}/versions/{version}") @ResponseStatus(HttpStatus.OK) @ApiException(SWITCH_PROCESS_DEFINITION_VERSION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result switchProcessDefinitionVersion(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code") long code, @PathVariable(value = "version") int version) { Map<String, Object> result = processDefinitionService.switchProcessDefinitionVersion(loginUser, projectCode, code, version); return returnDataList(result); } /** * delete the certain process definition version by version and process definition code * * @param loginUser login user info * @param projectCode project code * @param code the process definition code * @param version the process definition version user want to delete * @return delete version result code */ @ApiOperation(value = "deleteVersion", notes = "DELETE_PROCESS_DEFINITION_VERSION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "1"), @ApiImplicitParam(name = "version", value = "VERSION", required = true, dataType = "Int", example = "100") }) @DeleteMapping(value = "/{code}/versions/{version}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_PROCESS_DEFINITION_VERSION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteProcessDefinitionVersion(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code") long code, @PathVariable(value = "version") int version) { Map<String, Object> result = processDefinitionService.deleteProcessDefinitionVersion(loginUser, projectCode, code, version); return returnDataList(result); } /** * release process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @ApiOperation(value = "release", notes = "RELEASE_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, type = "String"), @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789"), @ApiImplicitParam(name = "releaseState", value = "PROCESS_DEFINITION_RELEASE", required = true, dataType = "ReleaseState"), }) @PostMapping(value = "/{code}/release") @ResponseStatus(HttpStatus.OK) @ApiException(RELEASE_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result releaseProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long code, @RequestParam(value = "releaseState", required = true) ReleaseState releaseState) { Map<String, Object> result = processDefinitionService.releaseProcessDefinition(loginUser, projectCode, code, releaseState); return returnDataList(result); } /** * query detail of process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @ApiOperation(value = "queryProcessDefinitionByCode", notes = "QUERY_PROCESS_DEFINITION_BY_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "123456789") }) @GetMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long code) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionByCode(loginUser, projectCode, code); return returnDataList(result); } /** * query detail of process definition by name * * @param loginUser login user * @param projectCode project code * @param name process definition name * @return process definition detail */ @ApiOperation(value = "queryProcessDefinitionByName", notes = "QUERY_PROCESS_DEFINITION_BY_NAME_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "name", value = "PROCESS_DEFINITION_NAME", required = true, dataType = "String") }) @GetMapping(value = "/query-by-name") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result<ProcessDefinition> queryProcessDefinitionByName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("name") String name) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionByName(loginUser, projectCode, name); return returnDataList(result); } /** * query Process definition list * * @param loginUser login user * @param projectCode project code * @return process definition list */ @ApiOperation(value = "queryList", notes = "QUERY_PROCESS_DEFINITION_LIST_NOTES") @GetMapping(value = "/list") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode) { Map<String, Object> result = processDefinitionService.queryProcessDefinitionList(loginUser, projectCode); return returnDataList(result); } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param pageNo page number * @param pageSize page size * @param userId user id * @return process definition page */ @ApiOperation(value = "queryListPaging", notes = "QUERY_PROCESS_DEFINITION_LIST_PAGING_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", required = false, type = "String"), @ApiImplicitParam(name = "userId", value = "USER_ID", required = false, dataType = "Int", example = "100"), @ApiImplicitParam(name = "pageNo", value = "PAGE_NO", required = true, dataType = "Int", example = "1"), @ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", required = true, dataType = "Int", example = "10") }) @GetMapping() @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryProcessDefinitionListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "searchVal", required = false) String searchVal, @RequestParam(value = "userId", required = false, defaultValue = "0") Integer userId, @RequestParam("pageNo") Integer pageNo, @RequestParam("pageSize") Integer pageSize) { Result result = checkPageParams(pageNo, pageSize); if (!result.checkResult()) { return result; } searchVal = ParameterUtils.handleEscapes(searchVal); return processDefinitionService.queryProcessDefinitionListPaging(loginUser, projectCode, searchVal, userId, pageNo, pageSize); } /** * encapsulation tree view structure * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param limit limit * @return tree view json data */ @ApiOperation(value = "viewTree", notes = "VIEW_TREE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100"), @ApiImplicitParam(name = "limit", value = "LIMIT", required = true, dataType = "Int", example = "100") }) @GetMapping(value = "/{code}/view-tree") @ResponseStatus(HttpStatus.OK) @ApiException(ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result viewTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code, @RequestParam("limit") Integer limit) { Map<String, Object> result = processDefinitionService.viewTree(code, limit); return returnDataList(result); } /** * get tasks list by process definition code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return task list */ @ApiOperation(value = "getTasksByDefinitionCode", notes = "GET_TASK_LIST_BY_DEFINITION_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "Long", example = "100") }) @GetMapping(value = "/{code}/tasks") @ResponseStatus(HttpStatus.OK) @ApiException(GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR) public Result getNodeListByDefinitionCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code) { Map<String, Object> result = processDefinitionService.getTaskNodeListByDefinitionCode(loginUser, projectCode, code); return returnDataList(result); } /** * get tasks list map by process definition multiple code * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @return node list data */ @ApiOperation(value = "getTaskListByDefinitionCodes", notes = "GET_TASK_LIST_BY_DEFINITION_CODE_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODES", required = true, type = "String", example = "100,200,300") }) @GetMapping(value = "/batch-query-tasks") @ResponseStatus(HttpStatus.OK) @ApiException(GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR) public Result getNodeListMapByDefinitionCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes) { Map<String, Object> result = processDefinitionService.getNodeListMapByDefinitionCodes(loginUser, projectCode, codes); return returnDataList(result); } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @ApiOperation(value = "deleteByCode", notes = "DELETE_PROCESS_DEFINITION_BY_ID_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "code", value = "PROCESS_DEFINITION_CODE", dataType = "Int", example = "100") }) @DeleteMapping(value = "/{code}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_PROCESS_DEFINE_BY_CODE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result deleteProcessDefinitionByCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("code") long code) { Map<String, Object> result = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, code); return returnDataList(result); } /** * batch delete process definition by codes * * @param loginUser login user * @param projectCode project code * @param codes process definition code list * @return delete result code */ @ApiOperation(value = "batchDeleteByCodes", notes = "BATCH_DELETE_PROCESS_DEFINITION_BY_IDS_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "String") }) @PostMapping(value = "/batch-delete") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result batchDeleteProcessDefinitionByCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes) { Map<String, Object> result = new HashMap<>(); List<String> deleteFailedCodeList = new ArrayList<>(); if (!StringUtils.isEmpty(codes)) { String[] processDefinitionCodeArray = codes.split(","); for (String strProcessDefinitionCode : processDefinitionCodeArray) { long code = Long.parseLong(strProcessDefinitionCode); try { Map<String, Object> deleteResult = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, code); if (!Status.SUCCESS.equals(deleteResult.get(Constants.STATUS))) { deleteFailedCodeList.add(strProcessDefinitionCode); logger.error((String) deleteResult.get(Constants.MSG)); } } catch (Exception e) { deleteFailedCodeList.add(strProcessDefinitionCode); } } } if (!deleteFailedCodeList.isEmpty()) { putMsg(result, BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR, String.join(",", deleteFailedCodeList)); } else { putMsg(result, Status.SUCCESS); } return returnDataList(result); } /** * batch export process definition by codes * * @param loginUser login user * @param projectCode project code * @param codes process definition codes * @param response response */ @ApiOperation(value = "batchExportByCodes", notes = "BATCH_EXPORT_PROCESS_DEFINITION_BY_CODES_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "codes", value = "PROCESS_DEFINITION_CODE", required = true, dataType = "String") }) @PostMapping(value = "/batch-export") @ResponseBody @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "response"}) public void batchExportProcessDefinitionByCodes(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("codes") String codes, HttpServletResponse response) { try { processDefinitionService.batchExportProcessDefinitionByCodes(loginUser, projectCode, codes, response); } catch (Exception e) { logger.error(Status.BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR.getMsg(), e); } } /** * query all process definition by project code * * @param loginUser login user * @param projectCode project code * @return process definition list */ @ApiOperation(value = "queryAllByProjectCode", notes = "QUERY_PROCESS_DEFINITION_All_BY_PROJECT_CODE_NOTES") @GetMapping(value = "/all") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_PROCESS_DEFINITION_LIST) @AccessLogAnnotation(ignoreRequestArgs = "loginUser") public Result queryAllProcessDefinitionByProjectCode(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode) { Map<String, Object> result = processDefinitionService.queryAllProcessDefinitionByProjectCode(loginUser, projectCode); return returnDataList(result); } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file resource file * @return import result code */ @ApiOperation(value = "importProcessDefinition", notes = "IMPORT_PROCESS_DEFINITION_NOTES") @ApiImplicitParams({ @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile") }) @PostMapping(value = "/import") @ApiException(IMPORT_PROCESS_DEFINE_ERROR) @AccessLogAnnotation(ignoreRequestArgs = {"loginUser", "file"}) public Result importProcessDefinition(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @ApiParam(name = "projectCode", value = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam("file") MultipartFile file) { Map<String, Object> result = processDefinitionService.importProcessDefinition(loginUser, projectCode, file); return returnDataList(result); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.enums; import java.util.Locale; import org.springframework.context.i18n.LocaleContextHolder; /** * status enum // todo #4855 One category one interval */ public enum Status { SUCCESS(0, "success", "成功"), INTERNAL_SERVER_ERROR_ARGS(10000, "Internal Server Error: {0}", "服务端异常: {0}"), REQUEST_PARAMS_NOT_VALID_ERROR(10001, "request parameter {0} is not valid", "请求参数[{0}]无效"), TASK_TIMEOUT_PARAMS_ERROR(10002, "task timeout parameter is not valid", "任务超时参数无效"), USER_NAME_EXIST(10003, "user name already exists", "用户名已存在"), USER_NAME_NULL(10004, "user name is null", "用户名不能为空"), HDFS_OPERATION_ERROR(10006, "hdfs operation error", "hdfs操作错误"), TASK_INSTANCE_NOT_FOUND(10008, "task instance not found", "任务实例不存在"), OS_TENANT_CODE_EXIST(10009, "os tenant code {0} already exists", "操作系统租户[{0}]已存在"), USER_NOT_EXIST(10010, "user {0} not exists", "用户[{0}]不存在"), ALERT_GROUP_NOT_EXIST(10011, "alarm group not found", "告警组不存在"), ALERT_GROUP_EXIST(10012, "alarm group already exists", "告警组名称已存在"), USER_NAME_PASSWD_ERROR(10013, "user name or password error", "用户名或密码错误"), LOGIN_SESSION_FAILED(10014, "create session failed!", "创建session失败"), DATASOURCE_EXIST(10015, "data source name already exists", "数据源名称已存在"), DATASOURCE_CONNECT_FAILED(10016, "data source connection failed", "建立数据源连接失败"), TENANT_NOT_EXIST(10017, "tenant not exists", "租户不存在"), PROJECT_NOT_FOUNT(10018, "project {0} not found ", "项目[{0}]不存在"), PROJECT_ALREADY_EXISTS(10019, "project {0} already exists", "项目名称[{0}]已存在"), TASK_INSTANCE_NOT_EXISTS(10020, "task instance {0} does not exist", "任务实例[{0}]不存在"), TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE(10021, "task instance {0} is not sub process instance", "任务实例[{0}]不是子流程实例"), SCHEDULE_CRON_NOT_EXISTS(10022, "scheduler crontab {0} does not exist", "调度配置定时表达式[{0}]不存在"), SCHEDULE_CRON_ONLINE_FORBID_UPDATE(10023, "online status does not allow update operations", "调度配置上线状态不允许修改"), SCHEDULE_CRON_CHECK_FAILED(10024, "scheduler crontab expression validation failure: {0}", "调度配置定时表达式验证失败: {0}"), MASTER_NOT_EXISTS(10025, "master does not exist", "无可用master节点"), SCHEDULE_STATUS_UNKNOWN(10026, "unknown status: {0}", "未知状态: {0}"), CREATE_ALERT_GROUP_ERROR(10027, "create alert group error", "创建告警组错误"), QUERY_ALL_ALERTGROUP_ERROR(10028, "query all alertgroup error", "查询告警组错误"), LIST_PAGING_ALERT_GROUP_ERROR(10029, "list paging alert group error", "分页查询告警组错误"), UPDATE_ALERT_GROUP_ERROR(10030, "update alert group error", "更新告警组错误"), DELETE_ALERT_GROUP_ERROR(10031, "delete alert group error", "删除告警组错误"), ALERT_GROUP_GRANT_USER_ERROR(10032, "alert group grant user error", "告警组授权用户错误"), CREATE_DATASOURCE_ERROR(10033, "create datasource error", "创建数据源错误"), UPDATE_DATASOURCE_ERROR(10034, "update datasource error", "更新数据源错误"), QUERY_DATASOURCE_ERROR(10035, "query datasource error", "查询数据源错误"), CONNECT_DATASOURCE_FAILURE(10036, "connect datasource failure", "建立数据源连接失败"), CONNECTION_TEST_FAILURE(10037, "connection test failure", "测试数据源连接失败"), DELETE_DATA_SOURCE_FAILURE(10038, "delete data source failure", "删除数据源失败"), VERIFY_DATASOURCE_NAME_FAILURE(10039, "verify datasource name failure", "验证数据源名称失败"), UNAUTHORIZED_DATASOURCE(10040, "unauthorized datasource", "未经授权的数据源"), AUTHORIZED_DATA_SOURCE(10041, "authorized data source", "授权数据源失败"), LOGIN_SUCCESS(10042, "login success", "登录成功"), USER_LOGIN_FAILURE(10043, "user login failure", "用户登录失败"), LIST_WORKERS_ERROR(10044, "list workers error", "查询worker列表错误"), LIST_MASTERS_ERROR(10045, "list masters error", "查询master列表错误"), UPDATE_PROJECT_ERROR(10046, "update project error", "更新项目信息错误"), QUERY_PROJECT_DETAILS_BY_CODE_ERROR(10047, "query project details by code error", "查询项目详细信息错误"), CREATE_PROJECT_ERROR(10048, "create project error", "创建项目错误"), LOGIN_USER_QUERY_PROJECT_LIST_PAGING_ERROR(10049, "login user query project list paging error", "分页查询项目列表错误"), DELETE_PROJECT_ERROR(10050, "delete project error", "删除项目错误"), QUERY_UNAUTHORIZED_PROJECT_ERROR(10051, "query unauthorized project error", "查询未授权项目错误"), QUERY_AUTHORIZED_PROJECT(10052, "query authorized project", "查询授权项目错误"), QUERY_QUEUE_LIST_ERROR(10053, "query queue list error", "查询队列列表错误"), CREATE_RESOURCE_ERROR(10054, "create resource error", "创建资源错误"), UPDATE_RESOURCE_ERROR(10055, "update resource error", "更新资源错误"), QUERY_RESOURCES_LIST_ERROR(10056, "query resources list error", "查询资源列表错误"), QUERY_RESOURCES_LIST_PAGING(10057, "query resources list paging", "分页查询资源列表错误"), DELETE_RESOURCE_ERROR(10058, "delete resource error", "删除资源错误"), VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR(10059, "verify resource by name and type error", "资源名称或类型验证错误"), VIEW_RESOURCE_FILE_ON_LINE_ERROR(10060, "view resource file online error", "查看资源文件错误"), CREATE_RESOURCE_FILE_ON_LINE_ERROR(10061, "create resource file online error", "创建资源文件错误"), RESOURCE_FILE_IS_EMPTY(10062, "resource file is empty", "资源文件内容不能为空"), EDIT_RESOURCE_FILE_ON_LINE_ERROR(10063, "edit resource file online error", "更新资源文件错误"), DOWNLOAD_RESOURCE_FILE_ERROR(10064, "download resource file error", "下载资源文件错误"), CREATE_UDF_FUNCTION_ERROR(10065, "create udf function error", "创建UDF函数错误"), VIEW_UDF_FUNCTION_ERROR(10066, "view udf function error", "查询UDF函数错误"), UPDATE_UDF_FUNCTION_ERROR(10067, "update udf function error", "更新UDF函数错误"), QUERY_UDF_FUNCTION_LIST_PAGING_ERROR(10068, "query udf function list paging error", "分页查询UDF函数列表错误"), QUERY_DATASOURCE_BY_TYPE_ERROR(10069, "query datasource by type error", "查询数据源信息错误"), VERIFY_UDF_FUNCTION_NAME_ERROR(10070, "verify udf function name error", "UDF函数名称验证错误"), DELETE_UDF_FUNCTION_ERROR(10071, "delete udf function error", "删除UDF函数错误"), AUTHORIZED_FILE_RESOURCE_ERROR(10072, "authorized file resource error", "授权资源文件错误"), AUTHORIZE_RESOURCE_TREE(10073, "authorize resource tree display error", "授权资源目录树错误"), UNAUTHORIZED_UDF_FUNCTION_ERROR(10074, "unauthorized udf function error", "查询未授权UDF函数错误"), AUTHORIZED_UDF_FUNCTION_ERROR(10075, "authorized udf function error", "授权UDF函数错误"), CREATE_SCHEDULE_ERROR(10076, "create schedule error", "创建调度配置错误"), UPDATE_SCHEDULE_ERROR(10077, "update schedule error", "更新调度配置错误"), PUBLISH_SCHEDULE_ONLINE_ERROR(10078, "publish schedule online error", "上线调度配置错误"), OFFLINE_SCHEDULE_ERROR(10079, "offline schedule error", "下线调度配置错误"), QUERY_SCHEDULE_LIST_PAGING_ERROR(10080, "query schedule list paging error", "分页查询调度配置列表错误"), QUERY_SCHEDULE_LIST_ERROR(10081, "query schedule list error", "查询调度配置列表错误"), QUERY_TASK_LIST_PAGING_ERROR(10082, "query task list paging error", "分页查询任务列表错误"), QUERY_TASK_RECORD_LIST_PAGING_ERROR(10083, "query task record list paging error", "分页查询任务记录错误"), CREATE_TENANT_ERROR(10084, "create tenant error", "创建租户错误"), QUERY_TENANT_LIST_PAGING_ERROR(10085, "query tenant list paging error", "分页查询租户列表错误"), QUERY_TENANT_LIST_ERROR(10086, "query tenant list error", "查询租户列表错误"), UPDATE_TENANT_ERROR(10087, "update tenant error", "更新租户错误"), DELETE_TENANT_BY_ID_ERROR(10088, "delete tenant by id error", "删除租户错误"), VERIFY_OS_TENANT_CODE_ERROR(10089, "verify os tenant code error", "操作系统租户验证错误"), CREATE_USER_ERROR(10090, "create user error", "创建用户错误"), QUERY_USER_LIST_PAGING_ERROR(10091, "query user list paging error", "分页查询用户列表错误"), UPDATE_USER_ERROR(10092, "update user error", "更新用户错误"), DELETE_USER_BY_ID_ERROR(10093, "delete user by id error", "删除用户错误"), GRANT_PROJECT_ERROR(10094, "grant project error", "授权项目错误"), GRANT_RESOURCE_ERROR(10095, "grant resource error", "授权资源错误"), GRANT_UDF_FUNCTION_ERROR(10096, "grant udf function error", "授权UDF函数错误"), GRANT_DATASOURCE_ERROR(10097, "grant datasource error", "授权数据源错误"), GET_USER_INFO_ERROR(10098, "get user info error", "获取用户信息错误"), USER_LIST_ERROR(10099, "user list error", "查询用户列表错误"), VERIFY_USERNAME_ERROR(10100, "verify username error", "用户名验证错误"), UNAUTHORIZED_USER_ERROR(10101, "unauthorized user error", "查询未授权用户错误"), AUTHORIZED_USER_ERROR(10102, "authorized user error", "查询授权用户错误"), QUERY_TASK_INSTANCE_LOG_ERROR(10103, "view task instance log error", "查询任务实例日志错误"), DOWNLOAD_TASK_INSTANCE_LOG_FILE_ERROR(10104, "download task instance log file error", "下载任务日志文件错误"), CREATE_PROCESS_DEFINITION_ERROR(10105, "create process definition error", "创建工作流错误"), VERIFY_PROCESS_DEFINITION_NAME_UNIQUE_ERROR(10106, "verify process definition name unique error", "工作流定义名称验证错误"), UPDATE_PROCESS_DEFINITION_ERROR(10107, "update process definition error", "更新工作流定义错误"), RELEASE_PROCESS_DEFINITION_ERROR(10108, "release process definition error", "上线工作流错误"), QUERY_DATAIL_OF_PROCESS_DEFINITION_ERROR(10109, "query datail of process definition error", "查询工作流详细信息错误"), QUERY_PROCESS_DEFINITION_LIST(10110, "query process definition list", "查询工作流列表错误"), ENCAPSULATION_TREEVIEW_STRUCTURE_ERROR(10111, "encapsulation treeview structure error", "查询工作流树形图数据错误"), GET_TASKS_LIST_BY_PROCESS_DEFINITION_ID_ERROR(10112, "get tasks list by process definition id error", "查询工作流定义节点信息错误"), QUERY_PROCESS_INSTANCE_LIST_PAGING_ERROR(10113, "query process instance list paging error", "分页查询工作流实例列表错误"), QUERY_TASK_LIST_BY_PROCESS_INSTANCE_ID_ERROR(10114, "query task list by process instance id error", "查询任务实例列表错误"), UPDATE_PROCESS_INSTANCE_ERROR(10115, "update process instance error", "更新工作流实例错误"), QUERY_PROCESS_INSTANCE_BY_ID_ERROR(10116, "query process instance by id error", "查询工作流实例错误"), DELETE_PROCESS_INSTANCE_BY_ID_ERROR(10117, "delete process instance by id error", "删除工作流实例错误"), QUERY_SUB_PROCESS_INSTANCE_DETAIL_INFO_BY_TASK_ID_ERROR(10118, "query sub process instance detail info by task id error", "查询子流程任务实例错误"), QUERY_PARENT_PROCESS_INSTANCE_DETAIL_INFO_BY_SUB_PROCESS_INSTANCE_ID_ERROR(10119, "query parent process instance detail info by sub process instance id error", "查询子流程该工作流实例错误"), QUERY_PROCESS_INSTANCE_ALL_VARIABLES_ERROR(10120, "query process instance all variables error", "查询工作流自定义变量信息错误"), ENCAPSULATION_PROCESS_INSTANCE_GANTT_STRUCTURE_ERROR(10121, "encapsulation process instance gantt structure error", "查询工作流实例甘特图数据错误"), QUERY_PROCESS_DEFINITION_LIST_PAGING_ERROR(10122, "query process definition list paging error", "分页查询工作流定义列表错误"), SIGN_OUT_ERROR(10123, "sign out error", "退出错误"), OS_TENANT_CODE_HAS_ALREADY_EXISTS(10124, "os tenant code has already exists", "操作系统租户已存在"), IP_IS_EMPTY(10125, "ip is empty", "IP地址不能为空"), SCHEDULE_CRON_REALEASE_NEED_NOT_CHANGE(10126, "schedule release is already {0}", "调度配置上线错误[{0}]"), CREATE_QUEUE_ERROR(10127, "create queue error", "创建队列错误"), QUEUE_NOT_EXIST(10128, "queue {0} not exists", "队列ID[{0}]不存在"), QUEUE_VALUE_EXIST(10129, "queue value {0} already exists", "队列值[{0}]已存在"), QUEUE_NAME_EXIST(10130, "queue name {0} already exists", "队列名称[{0}]已存在"), UPDATE_QUEUE_ERROR(10131, "update queue error", "更新队列信息错误"), NEED_NOT_UPDATE_QUEUE(10132, "no content changes, no updates are required", "数据未变更,不需要更新队列信息"), VERIFY_QUEUE_ERROR(10133, "verify queue error", "验证队列信息错误"), NAME_NULL(10134, "name must be not null", "名称不能为空"), NAME_EXIST(10135, "name {0} already exists", "名称[{0}]已存在"), SAVE_ERROR(10136, "save error", "保存错误"), DELETE_PROJECT_ERROR_DEFINES_NOT_NULL(10137, "please delete the process definitions in project first!", "请先删除全部工作流定义"), BATCH_DELETE_PROCESS_INSTANCE_BY_IDS_ERROR(10117, "batch delete process instance by ids {0} error", "批量删除工作流实例错误"), PREVIEW_SCHEDULE_ERROR(10139, "preview schedule error", "预览调度配置错误"), PARSE_TO_CRON_EXPRESSION_ERROR(10140, "parse cron to cron expression error", "解析调度表达式错误"), SCHEDULE_START_TIME_END_TIME_SAME(10141, "The start time must not be the same as the end", "开始时间不能和结束时间一样"), DELETE_TENANT_BY_ID_FAIL(10142, "delete tenant by id fail, for there are {0} process instances in executing using it", "删除租户失败,有[{0}]个运行中的工作流实例正在使用"), DELETE_TENANT_BY_ID_FAIL_DEFINES(10143, "delete tenant by id fail, for there are {0} process definitions using it", "删除租户失败,有[{0}]个工作流定义正在使用"), DELETE_TENANT_BY_ID_FAIL_USERS(10144, "delete tenant by id fail, for there are {0} users using it", "删除租户失败,有[{0}]个用户正在使用"), DELETE_WORKER_GROUP_BY_ID_FAIL(10145, "delete worker group by id fail, for there are {0} process instances in executing using it", "删除Worker分组失败,有[{0}]个运行中的工作流实例正在使用"), QUERY_WORKER_GROUP_FAIL(10146, "query worker group fail ", "查询worker分组失败"), DELETE_WORKER_GROUP_FAIL(10147, "delete worker group fail ", "删除worker分组失败"), USER_DISABLED(10148, "The current user is disabled", "当前用户已停用"), COPY_PROCESS_DEFINITION_ERROR(10149, "copy process definition from {0} to {1} error : {2}", "从{0}复制工作流到{1}错误 : {2}"), MOVE_PROCESS_DEFINITION_ERROR(10150, "move process definition from {0} to {1} error : {2}", "从{0}移动工作流到{1}错误 : {2}"), SWITCH_PROCESS_DEFINITION_VERSION_ERROR(10151, "Switch process definition version error", "切换工作流版本出错"), SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR(10152 , "Switch process definition version error: not exists process definition, [process definition id {0}]", "切换工作流版本出错:工作流不存在,[工作流id {0}]"), SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR(10153 , "Switch process definition version error: not exists process definition version, [process definition id {0}] [version number {1}]", "切换工作流版本出错:工作流版本信息不存在,[工作流id {0}] [版本号 {1}]"), QUERY_PROCESS_DEFINITION_VERSIONS_ERROR(10154, "query process definition versions error", "查询工作流历史版本信息出错"), DELETE_PROCESS_DEFINITION_VERSION_ERROR(10156, "delete process definition version error", "删除工作流历史版本出错"), QUERY_USER_CREATED_PROJECT_ERROR(10157, "query user created project error error", "查询用户创建的项目错误"), PROCESS_DEFINITION_CODES_IS_EMPTY(10158, "process definition codes is empty", "工作流CODES不能为空"), BATCH_COPY_PROCESS_DEFINITION_ERROR(10159, "batch copy process definition error", "复制工作流错误"), BATCH_MOVE_PROCESS_DEFINITION_ERROR(10160, "batch move process definition error", "移动工作流错误"), QUERY_WORKFLOW_LINEAGE_ERROR(10161, "query workflow lineage error", "查询血缘失败"), QUERY_AUTHORIZED_AND_USER_CREATED_PROJECT_ERROR(10162, "query authorized and user created project error error", "查询授权的和用户创建的项目错误"), DELETE_PROCESS_DEFINITION_BY_CODE_FAIL(10163, "delete process definition by code fail, for there are {0} process instances in executing using it", "删除工作流定义失败,有[{0}]个运行中的工作流实例正在使用"), CHECK_OS_TENANT_CODE_ERROR(10164, "Please enter the English os tenant code", "请输入英文操作系统租户"), FORCE_TASK_SUCCESS_ERROR(10165, "force task success error", "强制成功任务实例错误"), TASK_INSTANCE_STATE_OPERATION_ERROR(10166, "the status of task instance {0} is {1},Cannot perform force success operation", "任务实例[{0}]的状态是[{1}],无法执行强制成功操作"), DATASOURCE_TYPE_NOT_EXIST(10167, "data source type not exist", "数据源类型不存在"), PROCESS_DEFINITION_NAME_EXIST(10168, "process definition name {0} already exists", "工作流定义名称[{0}]已存在"), DATASOURCE_DB_TYPE_ILLEGAL(10169, "datasource type illegal", "数据源类型参数不合法"), DATASOURCE_PORT_ILLEGAL(10170, "datasource port illegal", "数据源端口参数不合法"), DATASOURCE_OTHER_PARAMS_ILLEGAL(10171, "datasource other params illegal", "数据源其他参数不合法"), DATASOURCE_NAME_ILLEGAL(10172, "datasource name illegal", "数据源名称不合法"), DATASOURCE_HOST_ILLEGAL(10173, "datasource host illegal", "数据源HOST不合法"), DELETE_WORKER_GROUP_NOT_EXIST(10174, "delete worker group not exist ", "删除worker分组不存在"), CREATE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10175, "create worker group forbidden in docker ", "创建worker分组在docker中禁止"), DELETE_WORKER_GROUP_FORBIDDEN_IN_DOCKER(10176, "delete worker group forbidden in docker ", "删除worker分组在docker中禁止"), WORKER_ADDRESS_INVALID(10177, "worker address {0} invalid", "worker地址[{0}]无效"), QUERY_WORKER_ADDRESS_LIST_FAIL(10178, "query worker address list fail ", "查询worker地址列表失败"), TRANSFORM_PROJECT_OWNERSHIP(10179, "Please transform project ownership [{0}]", "请先转移项目所有权[{0}]"), QUERY_ALERT_GROUP_ERROR(10180, "query alert group error", "查询告警组错误"), UDF_FUNCTION_NOT_EXIST(20001, "UDF function not found", "UDF函数不存在"), UDF_FUNCTION_EXISTS(20002, "UDF function already exists", "UDF函数已存在"), RESOURCE_NOT_EXIST(20004, "resource not exist", "资源不存在"), RESOURCE_EXIST(20005, "resource already exists", "资源已存在"), RESOURCE_SUFFIX_NOT_SUPPORT_VIEW(20006, "resource suffix do not support online viewing", "资源文件后缀不支持查看"), RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"), RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"), UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"), HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"), RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"), RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"), UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}", "udf函数绑定了资源文件[{0}]"), RESOURCE_IS_USED(20014, "resource file is used by process definition", "资源文件被上线的流程定义使用了"), PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist", "父资源文件不存在"), RESOURCE_NOT_EXIST_OR_NO_PERMISSION(20016, "resource not exist or no permission,please view the task node and remove error resource", "请检查任务节点并移除无权限或者已删除的资源"), RESOURCE_IS_AUTHORIZED(20017, "resource is authorized to user {0},suffix not allowed to be modified", "资源文件已授权其他用户[{0}],后缀不允许修改"), USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"), USER_NO_OPERATION_PROJECT_PERM(30002, "user {0} is not has project {1} permission", "当前用户[{0}]没有[{1}]项目的操作权限"), PROCESS_INSTANCE_NOT_EXIST(50001, "process instance {0} does not exist", "工作流实例[{0}]不存在"), PROCESS_INSTANCE_EXIST(50002, "process instance {0} already exists", "工作流实例[{0}]已存在"), PROCESS_DEFINE_NOT_EXIST(50003, "process definition {0} does not exist", "工作流定义[{0}]不存在"), PROCESS_DEFINE_NOT_RELEASE(50004, "process definition {0} not on line", "工作流定义[{0}]不是上线状态"), PROCESS_INSTANCE_ALREADY_CHANGED(50005, "the status of process instance {0} is already {1}", "工作流实例[{0}]的状态已经是[{1}]"), PROCESS_INSTANCE_STATE_OPERATION_ERROR(50006, "the status of process instance {0} is {1},Cannot perform {2} operation", "工作流实例[{0}]的状态是[{1}],无法执行[{2}]操作"), SUB_PROCESS_INSTANCE_NOT_EXIST(50007, "the task belong to process instance does not exist", "子工作流实例不存在"), PROCESS_DEFINE_NOT_ALLOWED_EDIT(50008, "process definition {0} does not allow edit", "工作流定义[{0}]不允许修改"), PROCESS_INSTANCE_EXECUTING_COMMAND(50009, "process instance {0} is executing the command, please wait ...", "工作流实例[{0}]正在执行命令,请稍等..."), PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE(50010, "process instance {0} is not sub process instance", "工作流实例[{0}]不是子工作流实例"), TASK_INSTANCE_STATE_COUNT_ERROR(50011, "task instance state count error", "查询各状态任务实例数错误"), COUNT_PROCESS_INSTANCE_STATE_ERROR(50012, "count process instance state error", "查询各状态流程实例数错误"), COUNT_PROCESS_DEFINITION_USER_ERROR(50013, "count process definition user error", "查询各用户流程定义数错误"), START_PROCESS_INSTANCE_ERROR(50014, "start process instance error", "运行工作流实例错误"), EXECUTE_PROCESS_INSTANCE_ERROR(50015, "execute process instance error", "操作工作流实例错误"), CHECK_PROCESS_DEFINITION_ERROR(50016, "check process definition error", "工作流定义错误"), QUERY_RECIPIENTS_AND_COPYERS_BY_PROCESS_DEFINITION_ERROR(50017, "query recipients and copyers by process definition error", "查询收件人和抄送人错误"), DATA_IS_NOT_VALID(50017, "data {0} not valid", "数据[{0}]无效"), DATA_IS_NULL(50018, "data {0} is null", "数据[{0}]不能为空"), PROCESS_NODE_HAS_CYCLE(50019, "process node has cycle", "流程节点间存在循环依赖"), PROCESS_NODE_S_PARAMETER_INVALID(50020, "process node {0} parameter invalid", "流程节点[{0}]参数无效"), PROCESS_DEFINE_STATE_ONLINE(50021, "process definition {0} is already on line", "工作流定义[{0}]已上线"), DELETE_PROCESS_DEFINE_BY_CODE_ERROR(50022, "delete process definition by code error", "删除工作流定义错误"), SCHEDULE_CRON_STATE_ONLINE(50023, "the status of schedule {0} is already on line", "调度配置[{0}]已上线"), DELETE_SCHEDULE_CRON_BY_ID_ERROR(50024, "delete schedule by id error", "删除调度配置错误"), BATCH_DELETE_PROCESS_DEFINE_ERROR(50025, "batch delete process definition error", "批量删除工作流定义错误"), BATCH_DELETE_PROCESS_DEFINE_BY_CODES_ERROR(50026, "batch delete process definition by codes {0} error", "批量删除工作流定义[{0}]错误"), TENANT_NOT_SUITABLE(50027, "there is not any tenant suitable, please choose a tenant available.", "没有合适的租户,请选择可用的租户"), EXPORT_PROCESS_DEFINE_BY_ID_ERROR(50028, "export process definition by id error", "导出工作流定义错误"), BATCH_EXPORT_PROCESS_DEFINE_BY_IDS_ERROR(50028, "batch export process definition by ids error", "批量导出工作流定义错误"), IMPORT_PROCESS_DEFINE_ERROR(50029, "import process definition error", "导入工作流定义错误"), TASK_DEFINE_NOT_EXIST(50030, "task definition {0} does not exist", "任务定义[{0}]不存在"), CREATE_PROCESS_TASK_RELATION_ERROR(50032, "create process task relation error", "创建工作流任务关系错误"), PROCESS_TASK_RELATION_NOT_EXIST(50033, "process task relation {0} does not exist", "工作流任务关系[{0}]不存在"), PROCESS_TASK_RELATION_EXIST(50034, "process task relation is already exist, processCode:[{0}]", "工作流任务关系已存在, processCode:[{0}]"), PROCESS_DAG_IS_EMPTY(50035, "process dag can not be empty", "工作流dag不能为空"), CHECK_PROCESS_TASK_RELATION_ERROR(50036, "check process task relation error", "工作流任务关系参数错误"), CREATE_TASK_DEFINITION_ERROR(50037, "create task definition error", "创建任务错误"), UPDATE_TASK_DEFINITION_ERROR(50038, "update task definition error", "更新任务定义错误"), QUERY_TASK_DEFINITION_VERSIONS_ERROR(50039, "query task definition versions error", "查询任务历史版本信息出错"), SWITCH_TASK_DEFINITION_VERSION_ERROR(50040, "Switch task definition version error", "切换任务版本出错"), DELETE_TASK_DEFINITION_VERSION_ERROR(50041, "delete task definition version error", "删除任务历史版本出错"), DELETE_TASK_DEFINE_BY_CODE_ERROR(50042, "delete task definition by code error", "删除任务定义错误"), QUERY_DETAIL_OF_TASK_DEFINITION_ERROR(50043, "query detail of task definition error", "查询任务详细信息错误"), QUERY_TASK_DEFINITION_LIST_PAGING_ERROR(50044, "query task definition list paging error", "分页查询任务定义列表错误"), TASK_DEFINITION_NAME_EXISTED(50045, "task definition name [{0}] already exists", "任务定义名称[{0}]已经存在"), HDFS_NOT_STARTUP(60001, "hdfs not startup", "hdfs未启用"), /** * for monitor */ QUERY_DATABASE_STATE_ERROR(70001, "query database state error", "查询数据库状态错误"), QUERY_ZOOKEEPER_STATE_ERROR(70002, "query zookeeper state error", "查询zookeeper状态错误"), CREATE_ACCESS_TOKEN_ERROR(70010, "create access token error", "创建访问token错误"), GENERATE_TOKEN_ERROR(70011, "generate token error", "生成token错误"), QUERY_ACCESSTOKEN_LIST_PAGING_ERROR(70012, "query access token list paging error", "分页查询访问token列表错误"), UPDATE_ACCESS_TOKEN_ERROR(70013, "update access token error", "更新访问token错误"), DELETE_ACCESS_TOKEN_ERROR(70014, "delete access token error", "删除访问token错误"), ACCESS_TOKEN_NOT_EXIST(70015, "access token not exist", "访问token不存在"), COMMAND_STATE_COUNT_ERROR(80001, "task instance state count error", "查询各状态任务实例数错误"), NEGTIVE_SIZE_NUMBER_ERROR(80002, "query size number error", "查询size错误"), START_TIME_BIGGER_THAN_END_TIME_ERROR(80003, "start time bigger than end time error", "开始时间在结束时间之后错误"), QUEUE_COUNT_ERROR(90001, "queue count error", "查询队列数据错误"), KERBEROS_STARTUP_STATE(100001, "get kerberos startup state error", "获取kerberos启动状态错误"), //plugin PLUGIN_NOT_A_UI_COMPONENT(110001, "query plugin error, this plugin has no UI component", "查询插件错误,此插件无UI组件"), QUERY_PLUGINS_RESULT_IS_NULL(110002, "query plugins result is null", "查询插件为空"), QUERY_PLUGINS_ERROR(110003, "query plugins error", "查询插件错误"), QUERY_PLUGIN_DETAIL_RESULT_IS_NULL(110004, "query plugin detail result is null", "查询插件详情结果为空"), UPDATE_ALERT_PLUGIN_INSTANCE_ERROR(110005, "update alert plugin instance error", "更新告警组和告警组插件实例错误"), DELETE_ALERT_PLUGIN_INSTANCE_ERROR(110006, "delete alert plugin instance error", "删除告警组和告警组插件实例错误"), GET_ALERT_PLUGIN_INSTANCE_ERROR(110007, "get alert plugin instance error", "获取告警组和告警组插件实例错误"), CREATE_ALERT_PLUGIN_INSTANCE_ERROR(110008, "create alert plugin instance error", "创建告警组和告警组插件实例错误"), QUERY_ALL_ALERT_PLUGIN_INSTANCE_ERROR(110009, "query all alert plugin instance error", "查询所有告警实例失败"), PLUGIN_INSTANCE_ALREADY_EXIT(110010, "plugin instance already exit", "该告警插件实例已存在"), LIST_PAGING_ALERT_PLUGIN_INSTANCE_ERROR(110011, "query plugin instance page error", "分页查询告警实例失败"), DELETE_ALERT_PLUGIN_INSTANCE_ERROR_HAS_ALERT_GROUP_ASSOCIATED(110012, "failed to delete the alert instance, there is an alarm group associated with this alert instance", "删除告警实例失败,存在与此告警实例关联的警报组"), PROCESS_DEFINITION_VERSION_IS_USED(110013,"this process definition version is used","此工作流定义版本被使用"), CREATE_ENVIRONMENT_ERROR(120001, "create environment error", "创建环境失败"), ENVIRONMENT_NAME_EXISTS(120002,"this enviroment name [{0}] already exists","环境名称[{0}]已经存在"), ENVIRONMENT_NAME_IS_NULL(120003,"this enviroment name shouldn't be empty.","环境名称不能为空"), ENVIRONMENT_CONFIG_IS_NULL(120004,"this enviroment config shouldn't be empty.","环境配置信息不能为空"), UPDATE_ENVIRONMENT_ERROR(120005, "update environment [{0}] info error", "更新环境[{0}]信息失败"), DELETE_ENVIRONMENT_ERROR(120006, "delete environment error", "删除环境信息失败"), DELETE_ENVIRONMENT_RELATED_TASK_EXISTS(120007, "this environment has been used in tasks,so you can't delete it.", "该环境已经被任务使用,所以不能删除该环境信息"), QUERY_ENVIRONMENT_BY_NAME_ERROR(1200008, "not found environment [{0}] ", "查询环境名称[{0}]信息不存在"), QUERY_ENVIRONMENT_BY_CODE_ERROR(1200009, "not found environment [{0}] ", "查询环境编码[{0}]不存在"), QUERY_ENVIRONMENT_ERROR(1200010, "login user query environment error", "分页查询环境列表错误"), VERIFY_ENVIRONMENT_ERROR(1200011, "verify environment error", "验证环境信息错误"), ENVIRONMENT_WORKER_GROUPS_IS_INVALID(1200012, "environment worker groups is invalid format", "环境关联的工作组参数解析错误"), UPDATE_ENVIRONMENT_WORKER_GROUP_RELATION_ERROR(1200013,"You can't modify the worker group, because the worker group [{0}] and this environment [{1}] already be used in the task [{2}]", "您不能修改工作组选项,因为该工作组 [{0}] 和 该环境 [{1}] 已经被用在任务 [{2}] 中"); private final int code; private final String enMsg; private final String zhMsg; Status(int code, String enMsg, String zhMsg) { this.code = code; this.enMsg = enMsg; this.zhMsg = zhMsg; } public int getCode() { return this.code; } public String getMsg() { if (Locale.SIMPLIFIED_CHINESE.getLanguage().equals(LocaleContextHolder.getLocale().getLanguage())) { return this.zhMsg; } else { return this.enMsg; } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs); } private Map<String, Object> createDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } return result; } private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { Map<String, Object> result = new HashMap<>(); try { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream() .map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion == 0) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { scheduleMapper.deleteById(schedule.getId()); } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if ((delete & deleteRelation) == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); for (Schedule schedule : scheduleList) { logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); scheduleMapper.updateById(schedule); schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList()); if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) { putMsg(createDagResult, Status.SUCCESS); } else { result.putAll(createDagResult); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); treeViewDto.setCode(taskNode.getCode()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); try { result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR); } } else { try { result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR); } } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(processDefinition.getCode(), processDefinition.getVersion()); if ((deleteLog & deleteRelationLog) == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessInstanceServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.DATA_LIST; import static org.apache.dolphinscheduler.common.Constants.DEPENDENT_SPLIT; import static org.apache.dolphinscheduler.common.Constants.GLOBAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.PROCESS_INSTANCE_STATE; import static org.apache.dolphinscheduler.common.Constants.TASK_LIST; import org.apache.dolphinscheduler.api.dto.gantt.GanttDto; import org.apache.dolphinscheduler.api.dto.gantt.Task; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.LoggerService; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.UsersService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.placeholder.BusinessTimeUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * process instance service impl */ @Service public class ProcessInstanceServiceImpl extends BaseServiceImpl implements ProcessInstanceService { public static final String TASK_TYPE = "taskType"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; @Autowired ProjectMapper projectMapper; @Autowired ProjectService projectService; @Autowired ProcessService processService; @Autowired ProcessInstanceMapper processInstanceMapper; @Autowired ProcessDefinitionMapper processDefineMapper; @Autowired ProcessDefinitionService processDefinitionService; @Autowired ExecutorService execService; @Autowired TaskInstanceMapper taskInstanceMapper; @Autowired LoggerService loggerService; @Autowired ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired UsersService usersService; @Autowired private TenantMapper tenantMapper; /** * return top n SUCCESS process instance order by running time which started between startTime and endTime */ @Override public Map<String, Object> queryTopNLongestRunningProcessInstance(User loginUser, long projectCode, int size, String startTime, String endTime) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (0 > size) { putMsg(result, Status.NEGTIVE_SIZE_NUMBER_ERROR, size); return result; } if (Objects.isNull(startTime)) { putMsg(result, Status.DATA_IS_NULL, Constants.START_TIME); return result; } Date start = DateUtils.stringToDate(startTime); if (Objects.isNull(endTime)) { putMsg(result, Status.DATA_IS_NULL, Constants.END_TIME); return result; } Date end = DateUtils.stringToDate(endTime); if (start == null || end == null) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, Constants.START_END_DATE); return result; } if (start.getTime() > end.getTime()) { putMsg(result, Status.START_TIME_BIGGER_THAN_END_TIME_ERROR, startTime, endTime); return result; } List<ProcessInstance> processInstances = processInstanceMapper.queryTopNProcessInstance(size, start, end, ExecutionStatus.SUCCESS); result.put(DATA_LIST, processInstances); putMsg(result, Status.SUCCESS); return result; } /** * query process instance by id * * @param loginUser login user * @param projectCode project code * @param processId process instance id * @return process instance detail */ @Override public Map<String, Object> queryProcessInstanceById(User loginUser, long projectCode, Integer processId) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId); ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processId); } else { processInstance.setWarningGroupId(processDefinition.getWarningGroupId()); processInstance.setLocations(processDefinition.getLocations()); processInstance.setDagData(processService.genDagData(processDefinition)); result.put(DATA_LIST, processInstance); putMsg(result, Status.SUCCESS); } return result; } /** * paging query process instance list, filtering according to project, process definition, time range, keyword, process status * * @param loginUser login user * @param projectCode project code * @param processDefineCode process definition code * @param pageNo page number * @param pageSize page size * @param searchVal search value * @param stateType state type * @param host host * @param startDate start time * @param endDate end time * @return process instance list */ @Override public Result queryProcessInstanceList(User loginUser, long projectCode, long processDefineCode, String startDate, String endDate, String searchVal, String executorName, ExecutionStatus stateType, String host, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultEnum = (Status) checkResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { putMsg(result,resultEnum); return result; } int[] statusArray = null; // filter by state if (stateType != null) { statusArray = new int[]{stateType.ordinal()}; } Map<String, Object> checkAndParseDateResult = checkAndParseDateParameters(startDate, endDate); resultEnum = (Status) checkAndParseDateResult.get(Constants.STATUS); if (resultEnum != Status.SUCCESS) { putMsg(result,resultEnum); return result; } Date start = (Date) checkAndParseDateResult.get(Constants.START_TIME); Date end = (Date) checkAndParseDateResult.get(Constants.END_TIME); Page<ProcessInstance> page = new Page<>(pageNo, pageSize); PageInfo<ProcessInstance> pageInfo = new PageInfo<>(pageNo, pageSize); int executorId = usersService.getUserIdByName(executorName); IPage<ProcessInstance> processInstanceList = processInstanceMapper.queryProcessInstanceListPaging(page, project.getCode(), processDefineCode, searchVal, executorId, statusArray, host, start, end); List<ProcessInstance> processInstances = processInstanceList.getRecords(); List<Integer> userIds = CollectionUtils.transformToList(processInstances, ProcessInstance::getExecutorId); Map<Integer, User> idToUserMap = CollectionUtils.collectionToMap(usersService.queryUser(userIds), User::getId); for (ProcessInstance processInstance : processInstances) { processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime())); User executor = idToUserMap.get(processInstance.getExecutorId()); if (null != executor) { processInstance.setExecutorName(executor.getUserName()); } } pageInfo.setTotal((int) processInstanceList.getTotal()); pageInfo.setTotalList(processInstances); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query task list by process instance id * * @param loginUser login user * @param projectCode project code * @param processId process instance id * @return task list for the process instance * @throws IOException io exception */ @Override public Map<String, Object> queryTaskListByProcessId(User loginUser, long projectCode, Integer processId) throws IOException { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processId); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processId); addDependResultForTaskList(taskInstanceList); Map<String, Object> resultMap = new HashMap<>(); resultMap.put(PROCESS_INSTANCE_STATE, processInstance.getState().toString()); resultMap.put(TASK_LIST, taskInstanceList); result.put(DATA_LIST, resultMap); putMsg(result, Status.SUCCESS); return result; } /** * add dependent result for dependent task */ private void addDependResultForTaskList(List<TaskInstance> taskInstanceList) throws IOException { for (TaskInstance taskInstance : taskInstanceList) { if (TaskType.DEPENDENT.getDesc().equalsIgnoreCase(taskInstance.getTaskType())) { Result<String> logResult = loggerService.queryLog( taskInstance.getId(), Constants.LOG_QUERY_SKIP_LINE_NUMBER, Constants.LOG_QUERY_LIMIT); if (logResult.getCode() == Status.SUCCESS.ordinal()) { String log = logResult.getData(); Map<String, DependResult> resultMap = parseLogForDependentResult(log); taskInstance.setDependentResult(JSONUtils.toJsonString(resultMap)); } } } } @Override public Map<String, DependResult> parseLogForDependentResult(String log) throws IOException { Map<String, DependResult> resultMap = new HashMap<>(); if (StringUtils.isEmpty(log)) { return resultMap; } BufferedReader br = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(log.getBytes( StandardCharsets.UTF_8)), StandardCharsets.UTF_8)); String line; while ((line = br.readLine()) != null) { if (line.contains(DEPENDENT_SPLIT)) { String[] tmpStringArray = line.split(":\\|\\|"); if (tmpStringArray.length != 2) { continue; } String dependResultString = tmpStringArray[1]; String[] dependStringArray = dependResultString.split(","); if (dependStringArray.length != 2) { continue; } String key = dependStringArray[0].trim(); DependResult dependResult = DependResult.valueOf(dependStringArray[1].trim()); resultMap.put(key, dependResult); } } return resultMap; } /** * query sub process instance detail info by task id * * @param loginUser login user * @param projectCode project code * @param taskId task id * @return sub process instance detail */ @Override public Map<String, Object> querySubProcessInstanceByTaskId(User loginUser, long projectCode, Integer taskId) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null) { putMsg(result, Status.TASK_INSTANCE_NOT_EXISTS, taskId); return result; } if (!taskInstance.isSubProcess()) { putMsg(result, Status.TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE, taskInstance.getName()); return result; } ProcessInstance subWorkflowInstance = processService.findSubProcessInstance( taskInstance.getProcessInstanceId(), taskInstance.getId()); if (subWorkflowInstance == null) { putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST, taskId); return result; } Map<String, Object> dataMap = new HashMap<>(); dataMap.put(Constants.SUBPROCESS_INSTANCE_ID, subWorkflowInstance.getId()); result.put(DATA_LIST, dataMap); putMsg(result, Status.SUCCESS); return result; } /** * update process instance * * @param loginUser login user * @param projectCode project code * @param taskRelationJson process task relation json * @param taskDefinitionJson taskDefinitionJson * @param processInstanceId process instance id * @param scheduleTime schedule time * @param syncDefine sync define * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @return update result code */ @Transactional @Override public Map<String, Object> updateProcessInstance(User loginUser, long projectCode, Integer processInstanceId, String taskRelationJson, String taskDefinitionJson, String scheduleTime, Boolean syncDefine, String globalParams, String locations, int timeout, String tenantCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } //check process instance exists ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } //check process instance status if (!processInstance.getState().typeIsFinished()) { putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), processInstance.getState().toString(), "update"); return result; } setProcessInstance(processInstance, tenantCode, scheduleTime, globalParams, timeout); if (Boolean.TRUE.equals(syncDefine)) { List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); if (taskDefinitionLogs.isEmpty()) { putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } int saveTaskResult = processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs); if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return result; } ProcessDefinition processDefinition = processDefineMapper.queryByCode(processInstance.getProcessDefinitionCode()); List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); //check workflow json is valid result = processDefinitionService.checkProcessNodeList(taskRelationJson); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } processDefinition.set(projectCode, processDefinition.getName(), processDefinition.getDescription(), globalParams, locations, timeout, tenantId); processDefinition.setUpdateTime(new Date()); int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, false); if (insertVersion > 0) { int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult > 0) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); return result; } } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); return result; } processInstance.setProcessDefinitionVersion(insertVersion); } int update = processService.updateProcessInstance(processInstance); if (update > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.UPDATE_PROCESS_INSTANCE_ERROR); } return result; } /** * update process instance attributes */ private void setProcessInstance(ProcessInstance processInstance, String tenantCode, String scheduleTime, String globalParams, int timeout) { Date schedule = processInstance.getScheduleTime(); if (scheduleTime != null) { schedule = DateUtils.getScheduleDate(scheduleTime); } processInstance.setScheduleTime(schedule); List<Property> globalParamList = JSONUtils.toList(globalParams, Property.class); Map<String, String> globalParamMap = globalParamList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); globalParams = ParameterUtils.curingGlobalParams(globalParamMap, globalParamList, processInstance.getCmdTypeIfComplement(), schedule); processInstance.setTimeout(timeout); processInstance.setTenantCode(tenantCode); processInstance.setGlobalParams(globalParams); } /** * query parent process instance detail info by sub process instance id * * @param loginUser login user * @param projectCode project code * @param subId sub process id * @return parent instance detail */ @Override public Map<String, Object> queryParentInstanceBySubId(User loginUser, long projectCode, Integer subId) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessInstance subInstance = processService.findProcessInstanceDetailById(subId); if (subInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, subId); return result; } if (subInstance.getIsSubProcess() == Flag.NO) { putMsg(result, Status.PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE, subInstance.getName()); return result; } ProcessInstance parentWorkflowInstance = processService.findParentProcessInstance(subId); if (parentWorkflowInstance == null) { putMsg(result, Status.SUB_PROCESS_INSTANCE_NOT_EXIST); return result; } Map<String, Object> dataMap = new HashMap<>(); dataMap.put(Constants.PARENT_WORKFLOW_INSTANCE, parentWorkflowInstance.getId()); result.put(DATA_LIST, dataMap); putMsg(result, Status.SUCCESS); return result; } /** * delete process instance by id, at the same time,delete task instance and their mapping relation data * * @param loginUser login user * @param projectCode project code * @param processInstanceId process instance id * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessInstanceById(User loginUser, long projectCode, Integer processInstanceId) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (null == processInstance) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } try { processService.removeTaskLogFile(processInstanceId); } catch (Exception e) { } // delete database cascade int delete = processService.deleteWorkProcessInstanceById(processInstanceId); processService.deleteAllSubWorkProcessByParentId(processInstanceId); processService.deleteWorkProcessMapByParentId(processInstanceId); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_PROCESS_INSTANCE_BY_ID_ERROR); } return result; } /** * view process instance variables * * @param processInstanceId process instance id * @return variables data */ @Override public Map<String, Object> viewVariables(Integer processInstanceId) { Map<String, Object> result = new HashMap<>(); ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId); if (processInstance == null) { throw new RuntimeException("workflow instance is null"); } Map<String, String> timeParams = BusinessTimeUtils .getBusinessTime(processInstance.getCmdTypeIfComplement(), processInstance.getScheduleTime()); String userDefinedParams = processInstance.getGlobalParams(); // global params List<Property> globalParams = new ArrayList<>(); // global param string String globalParamStr = ParameterUtils.convertParameterPlaceholders(JSONUtils.toJsonString(globalParams), timeParams); globalParams = JSONUtils.toList(globalParamStr, Property.class); for (Property property : globalParams) { timeParams.put(property.getProp(), property.getValue()); } if (userDefinedParams != null && userDefinedParams.length() > 0) { globalParams = JSONUtils.toList(userDefinedParams, Property.class); } Map<String, Map<String, Object>> localUserDefParams = getLocalParams(processInstance, timeParams); Map<String, Object> resultMap = new HashMap<>(); resultMap.put(GLOBAL_PARAMS, globalParams); resultMap.put(LOCAL_PARAMS, localUserDefParams); result.put(DATA_LIST, resultMap); putMsg(result, Status.SUCCESS); return result; } /** * get local params */ private Map<String, Map<String, Object>> getLocalParams(ProcessInstance processInstance, Map<String, String> timeParams) { Map<String, Map<String, Object>> localUserDefParams = new HashMap<>(); List<TaskInstance> taskInstanceList = taskInstanceMapper.findValidTaskListByProcessId(processInstance.getId(), Flag.YES); for (TaskInstance taskInstance : taskInstanceList) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); String localParams = JSONUtils.getNodeString(taskDefinitionLog.getTaskParams(), LOCAL_PARAMS); if (!StringUtils.isEmpty(localParams)) { localParams = ParameterUtils.convertParameterPlaceholders(localParams, timeParams); List<Property> localParamsList = JSONUtils.toList(localParams, Property.class); Map<String, Object> localParamsMap = new HashMap<>(); localParamsMap.put(TASK_TYPE, taskDefinitionLog.getTaskType()); localParamsMap.put(LOCAL_PARAMS_LIST, localParamsList); if (CollectionUtils.isNotEmpty(localParamsList)) { localUserDefParams.put(taskDefinitionLog.getName(), localParamsMap); } } } return localUserDefParams; } /** * encapsulation gantt structure * * @param processInstanceId process instance id * @return gantt tree data * @throws Exception exception when json parse */ @Override public Map<String, Object> viewGantt(Integer processInstanceId) throws Exception { Map<String, Object> result = new HashMap<>(); ProcessInstance processInstance = processInstanceMapper.queryDetailById(processInstanceId); if (processInstance == null) { throw new RuntimeException("workflow instance is null"); } ProcessDefinition processDefinition = processDefinitionLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion() ); GanttDto ganttDto = new GanttDto(); DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); //topological sort List<String> nodeList = dag.topologicalSort(); ganttDto.setTaskNames(nodeList); List<Task> taskList = new ArrayList<>(); for (String node : nodeList) { TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstanceId, node); if (taskInstance == null) { continue; } Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); Task task = new Task(); task.setTaskName(taskInstance.getName()); task.getStartDate().add(startTime.getTime()); task.getEndDate().add(endTime.getTime()); task.setIsoStart(startTime); task.setIsoEnd(endTime); task.setStatus(taskInstance.getState().toString()); task.setExecutionDate(taskInstance.getStartTime()); task.setDuration(DateUtils.format2Readable(endTime.getTime() - startTime.getTime())); taskList.add(task); } ganttDto.setTasks(taskList); result.put(DATA_LIST, ganttDto); putMsg(result, Status.SUCCESS); return result; } /** * query process instance by processDefinitionCode and stateArray * * @param processDefinitionCode processDefinitionCode * @param states states array * @return process instance list */ @Override public List<ProcessInstance> queryByProcessDefineCodeAndStatus(Long processDefinitionCode, int[] states) { return processInstanceMapper.queryByProcessDefineCodeAndStatus(processDefinitionCode, states); } /** * query process instance by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @param size size * @return process instance list */ @Override public List<ProcessInstance> queryByProcessDefineCode(Long processDefinitionCode, int size) { return processInstanceMapper.queryByProcessDefineCode(processDefinitionCode, size); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/TaskDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.TaskDefinitionService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * task definition service impl */ @Service public class TaskDefinitionServiceImpl extends BaseServiceImpl implements TaskDefinitionService { private static final Logger logger = LoggerFactory.getLogger(TaskDefinitionServiceImpl.class); @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessService processService; @Autowired private UserMapper userMapper; /** * create task definition * * @param loginUser login user * @param projectCode project code * @param taskDefinitionJson task definition json */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> createTaskDefinition(User loginUser, long projectCode, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByName(projectCode, taskDefinitionLog.getName()); if (taskDefinition != null) { logger.error("task definition name {} already exists", taskDefinitionLog.getName()); putMsg(result, Status.TASK_DEFINITION_NAME_EXISTED, taskDefinitionLog.getName()); return result; } } int saveTaskResult = processService.saveTaskDefine(loginUser, projectCode, taskDefinitionLogs); if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return result; } Map<String, Object> resData = new HashMap<>(); resData.put("total", taskDefinitionLogs.size()); resData.put("code", StringUtils.join(taskDefinitionLogs.stream().map(TaskDefinition::getCode).collect(Collectors.toList()), ",")); putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, resData); return result; } /** * query task definition * * @param loginUser login user * @param projectCode project code * @param taskName task name */ @Override public Map<String, Object> queryTaskDefinitionByName(User loginUser, long projectCode, String taskName) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByName(project.getCode(), taskName); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskName); } else { result.put(Constants.DATA_LIST, taskDefinition); putMsg(result, Status.SUCCESS); } return result; } /** * delete task definition * * @param loginUser login user * @param projectCode project code * @param taskCode task code */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> deleteTaskDefinitionByCode(User loginUser, long projectCode, long taskCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); putMsg(result, Status.PROCESS_TASK_RELATION_EXIST, StringUtils.join(processDefinitionCodes, ",")); return result; } int delete = taskDefinitionMapper.deleteByCode(taskCode); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_TASK_DEFINE_BY_CODE_ERROR); } return result; } /** * update task definition * * @param loginUser login user * @param projectCode project code * @param taskCode task code * @param taskDefinitionJsonObj task definition json object */ @Transactional(rollbackFor = RuntimeException.class) @Override public Map<String, Object> updateTaskDefinition(User loginUser, long projectCode, long taskCode, String taskDefinitionJsonObj) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (processService.isTaskOnline(taskCode)) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); return result; } TaskDefinitionLog taskDefinitionToUpdate = JSONUtils.parseObject(taskDefinitionJsonObj, TaskDefinitionLog.class); if (taskDefinitionToUpdate == null) { logger.error("taskDefinitionJson is not valid json"); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJsonObj); return result; } if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionToUpdate)) { logger.error("task definition {} parameter invalid", taskDefinitionToUpdate.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionToUpdate.getName()); return result; } Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskCode); if (version == null || version == 0) { putMsg(result, Status.DATA_IS_NOT_VALID, taskCode); return result; } Date now = new Date(); taskDefinitionToUpdate.setCode(taskCode); taskDefinitionToUpdate.setId(taskDefinition.getId()); taskDefinitionToUpdate.setProjectCode(projectCode); taskDefinitionToUpdate.setUserId(taskDefinition.getUserId()); taskDefinitionToUpdate.setVersion(version + 1); taskDefinitionToUpdate.setTaskType(taskDefinitionToUpdate.getTaskType().toUpperCase()); taskDefinitionToUpdate.setResourceIds(processService.getResourceIds(taskDefinitionToUpdate)); taskDefinitionToUpdate.setUpdateTime(now); int update = taskDefinitionMapper.updateById(taskDefinitionToUpdate); taskDefinitionToUpdate.setOperator(loginUser.getId()); taskDefinitionToUpdate.setOperateTime(now); taskDefinitionToUpdate.setCreateTime(now); int insert = taskDefinitionLogMapper.insert(taskDefinitionToUpdate); if ((update & insert) != 1) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); return result; } result.put(Constants.DATA_LIST, taskCode); putMsg(result, Status.SUCCESS, update); return result; } /** * update task definition * * @param loginUser login user * @param projectCode project code * @param taskCode task code * @param version the version user want to switch */ @Override public Map<String, Object> switchVersion(User loginUser, long projectCode, long taskCode, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (processService.isTaskOnline(taskCode)) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); return result; } TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, version); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setUpdateTime(new Date()); int switchVersion = taskDefinitionMapper.updateById(taskDefinitionLog); if (switchVersion > 0) { result.put(Constants.DATA_LIST, taskCode); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.SWITCH_TASK_DEFINITION_VERSION_ERROR); } return result; } @Override public Result queryTaskDefinitionVersions(User loginUser, long projectCode, long taskCode, int pageNo, int pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<TaskDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<TaskDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<TaskDefinitionLog> taskDefinitionVersionsPaging = taskDefinitionLogMapper.queryTaskDefinitionVersionsPaging(page, taskCode); List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(taskDefinitionLogs); pageInfo.setTotal((int) taskDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> deleteByCodeAndVersion(User loginUser, long projectCode, long taskCode, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); } else { int delete = taskDefinitionLogMapper.deleteByCodeAndVersion(taskCode, version); if (delete > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.DELETE_TASK_DEFINITION_VERSION_ERROR); } } return result; } @Override public Map<String, Object> queryTaskDefinitionDetail(User loginUser, long projectCode, long taskCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByCode(taskCode); if (taskDefinition == null) { putMsg(result, Status.TASK_DEFINE_NOT_EXIST, taskCode); } else { result.put(Constants.DATA_LIST, taskDefinition); putMsg(result, Status.SUCCESS); } return result; } @Override public Result queryTaskDefinitionListPaging(User loginUser, long projectCode, String taskType, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } if (StringUtils.isNotBlank(taskType)) { taskType = taskType.toUpperCase(); } Page<TaskDefinition> page = new Page<>(pageNo, pageSize); IPage<TaskDefinition> taskDefinitionIPage = taskDefinitionMapper.queryDefineListPaging( page, projectCode, taskType, searchVal, userId, isAdmin(loginUser)); if (StringUtils.isNotBlank(taskType)) { List<TaskDefinition> records = taskDefinitionIPage.getRecords(); for (TaskDefinition pd : records) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(taskDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } taskDefinitionIPage.setRecords(records); } PageInfo<TaskDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) taskDefinitionIPage.getTotal()); pageInfo.setTotalList(taskDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } @Override public Map<String, Object> genTaskCodeList(User loginUser, Integer genNum) { Map<String, Object> result = new HashMap<>(); if (genNum == null || genNum < 1 || genNum > 100) { logger.error("the genNum must be great than 1 and less than 100"); putMsg(result, Status.DATA_IS_NOT_VALID, genNum); return result; } List<Long> taskCodes = new ArrayList<>(); try { for (int i = 0; i < genNum; i++) { taskCodes.add(SnowFlakeUtils.getInstance().nextId()); } } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); } putMsg(result, Status.SUCCESS); // return processDefinitionCode result.put(Constants.DATA_LIST, taskCodes); return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.powermock.api.mockito.PowerMockito.mock; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.ProcessDefinitionServiceImpl; import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import javax.servlet.http.HttpServletResponse; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service test */ @RunWith(MockitoJUnitRunner.class) public class ProcessDefinitionServiceTest { private static final String taskRelationJson = "[{\"name\":\"\",\"preTaskCode\":0,\"preTaskVersion\":0,\"postTaskCode\":123456789," + "\"postTaskVersion\":1,\"conditionType\":0,\"conditionParams\":\"{}\"},{\"name\":\"\",\"preTaskCode\":123456789," + "\"preTaskVersion\":1,\"postTaskCode\":123451234,\"postTaskVersion\":1,\"conditionType\":0,\"conditionParams\":\"{}\"}]"; @InjectMocks private ProcessDefinitionServiceImpl processDefinitionService; @Mock private ProcessDefinitionMapper processDefineMapper; @Mock private ProcessTaskRelationMapper processTaskRelationMapper; @Mock private ProjectMapper projectMapper; @Mock private ProjectServiceImpl projectService; @Mock private ScheduleMapper scheduleMapper; @Mock private ProcessService processService; @Mock private ProcessInstanceService processInstanceService; @Mock private TaskInstanceMapper taskInstanceMapper; @Mock private TenantMapper tenantMapper; @Mock private DataSourceMapper dataSourceMapper; @Test public void testQueryProcessDefinitionList() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project not found Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.queryProcessDefinitionList(loginUser, projectCode); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); //project check auth success putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); List<ProcessDefinition> resourceList = new ArrayList<>(); resourceList.add(getProcessDefinition()); Mockito.when(processDefineMapper.queryAllDefinitionList(project.getCode())).thenReturn(resourceList); Map<String, Object> checkSuccessRes = processDefinitionService.queryProcessDefinitionList(loginUser, projectCode); Assert.assertEquals(Status.SUCCESS, checkSuccessRes.get(Constants.STATUS)); } @Test @SuppressWarnings("unchecked") public void testQueryProcessDefinitionListPaging() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project not found Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Result map = processDefinitionService.queryProcessDefinitionListPaging(loginUser, projectCode, "", 1, 5, 0); Assert.assertEquals(Status.PROJECT_NOT_FOUNT.getCode(), (int) map.getCode()); putMsg(result, Status.SUCCESS, projectCode); loginUser.setId(1); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Page<ProcessDefinition> page = new Page<>(1, 10); page.setTotal(30); Mockito.when(processDefineMapper.queryDefineListPaging( Mockito.any(IPage.class) , Mockito.eq("") , Mockito.eq(loginUser.getId()) , Mockito.eq(project.getCode()) , Mockito.anyBoolean())).thenReturn(page); Result map1 = processDefinitionService.queryProcessDefinitionListPaging( loginUser, 1L, "", 1, 10, loginUser.getId()); Assert.assertEquals(Status.SUCCESS.getMsg(), map1.getMsg()); } @Test public void testQueryProcessDefinitionByCode() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); Tenant tenant = new Tenant(); tenant.setId(1); tenant.setTenantCode("root"); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project check auth fail Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.queryProcessDefinitionByCode(loginUser, 1L, 1L); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); //project check auth success, instance not exist putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); DagData dagData = new DagData(getProcessDefinition(), null, null); Mockito.when(processService.genDagData(Mockito.any())).thenReturn(dagData); Map<String, Object> instanceNotexitRes = processDefinitionService.queryProcessDefinitionByCode(loginUser, projectCode, 1L); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, instanceNotexitRes.get(Constants.STATUS)); //instance exit Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(getProcessDefinition()); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Mockito.when(tenantMapper.queryById(1)).thenReturn(tenant); Map<String, Object> successRes = processDefinitionService.queryProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testQueryProcessDefinitionByName() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project check auth fail Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.queryProcessDefinitionByName(loginUser, projectCode, "test_def"); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); //project check auth success, instance not exist putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Mockito.when(processDefineMapper.queryByDefineName(project.getCode(), "test_def")).thenReturn(null); Map<String, Object> instanceNotExitRes = processDefinitionService.queryProcessDefinitionByName(loginUser, projectCode, "test_def"); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, instanceNotExitRes.get(Constants.STATUS)); //instance exit Mockito.when(processDefineMapper.queryByDefineName(project.getCode(), "test")).thenReturn(getProcessDefinition()); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> successRes = processDefinitionService.queryProcessDefinitionByName(loginUser, projectCode, "test"); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testBatchCopyProcessDefinition() { long projectCode = 1L; Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.GENERAL_USER); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); // copy project definition ids empty test Map<String, Object> map = processDefinitionService.batchCopyProcessDefinition(loginUser, projectCode, StringUtils.EMPTY, 2L); Assert.assertEquals(Status.PROCESS_DEFINITION_CODES_IS_EMPTY, map.get(Constants.STATUS)); // project check auth fail putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map1 = processDefinitionService.batchCopyProcessDefinition( loginUser, projectCode, String.valueOf(project.getId()), 2L); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map1.get(Constants.STATUS)); // project check auth success, target project name not equal project name, check auth target project fail projectCode = 2L; Project project1 = getProject(projectCode); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(project1); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); putMsg(result, Status.SUCCESS, projectCode); ProcessDefinition definition = getProcessDefinition(); List<ProcessDefinition> processDefinitionList = new ArrayList<>(); processDefinitionList.add(definition); Set<Long> definitionCodes = Arrays.stream("46".split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); Mockito.when(processDefineMapper.queryByCodes(definitionCodes)).thenReturn(processDefinitionList); Mockito.when(processService.saveProcessDefine(loginUser, definition, true)).thenReturn(2); Map<String, Object> map3 = processDefinitionService.batchCopyProcessDefinition( loginUser, projectCode, "46", 1L); Assert.assertEquals(Status.SUCCESS, map3.get(Constants.STATUS)); } @Test public void testBatchMoveProcessDefinition() { long projectCode = 1L; Project project1 = getProject(projectCode); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(project1); long projectCode2 = 2L; Project project2 = getProject(projectCode2); Mockito.when(projectMapper.queryByCode(projectCode2)).thenReturn(project2); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project1, projectCode)).thenReturn(result); Mockito.when(projectService.checkProjectAndAuth(loginUser, project2, projectCode2)).thenReturn(result); ProcessDefinition definition = getProcessDefinition(); definition.setVersion(1); List<ProcessDefinition> processDefinitionList = new ArrayList<>(); processDefinitionList.add(definition); Set<Long> definitionCodes = Arrays.stream("46".split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); Mockito.when(processDefineMapper.queryByCodes(definitionCodes)).thenReturn(processDefinitionList); Mockito.when(processService.saveProcessDefine(loginUser, definition, true)).thenReturn(2); Mockito.when(processTaskRelationMapper.queryByProcessCode(projectCode, 46L)).thenReturn(getProcessTaskRelation(projectCode)); putMsg(result, Status.SUCCESS); Map<String, Object> successRes = processDefinitionService.batchMoveProcessDefinition( loginUser, projectCode, "46", projectCode2); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void deleteProcessDefinitionByCodeTest() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); //project check auth fail Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 6L); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); //project check auth success, instance not exist putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Mockito.when(processDefineMapper.queryByCode(1L)).thenReturn(null); Map<String, Object> instanceNotExitRes = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 1L); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, instanceNotExitRes.get(Constants.STATUS)); ProcessDefinition processDefinition = getProcessDefinition(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); //user no auth loginUser.setUserType(UserType.GENERAL_USER); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); Map<String, Object> userNoAuthRes = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.USER_NO_OPERATION_PERM, userNoAuthRes.get(Constants.STATUS)); //process definition online loginUser.setUserType(UserType.ADMIN_USER); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); processDefinition.setReleaseState(ReleaseState.ONLINE); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); Map<String, Object> dfOnlineRes = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.PROCESS_DEFINE_STATE_ONLINE, dfOnlineRes.get(Constants.STATUS)); //scheduler list elements > 1 processDefinition.setReleaseState(ReleaseState.OFFLINE); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); List<Schedule> schedules = new ArrayList<>(); schedules.add(getSchedule()); schedules.add(getSchedule()); Mockito.when(scheduleMapper.queryByProcessDefinitionCode(46L)).thenReturn(schedules); Map<String, Object> schedulerGreaterThanOneRes = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR, schedulerGreaterThanOneRes.get(Constants.STATUS)); //scheduler online schedules.clear(); Schedule schedule = getSchedule(); schedule.setReleaseState(ReleaseState.ONLINE); schedules.add(schedule); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Mockito.when(scheduleMapper.queryByProcessDefinitionCode(46L)).thenReturn(schedules); Map<String, Object> schedulerOnlineRes = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.SCHEDULE_CRON_STATE_ONLINE, schedulerOnlineRes.get(Constants.STATUS)); //delete success schedules.clear(); schedule.setReleaseState(ReleaseState.OFFLINE); schedules.add(schedule); Mockito.when(processDefineMapper.deleteById(46)).thenReturn(1); Mockito.when(processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode())).thenReturn(1); Mockito.when(scheduleMapper.queryByProcessDefinitionCode(46L)).thenReturn(schedules); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> deleteSuccess = processDefinitionService.deleteProcessDefinitionByCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.SUCCESS, deleteSuccess.get(Constants.STATUS)); } @Test public void testReleaseProcessDefinition() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.GENERAL_USER); //project check auth fail Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.releaseProcessDefinition(loginUser, projectCode, 6, ReleaseState.OFFLINE); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); // project check auth success, processs definition online putMsg(result, Status.SUCCESS, projectCode); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(getProcessDefinition()); Map<String, Object> onlineRes = processDefinitionService.releaseProcessDefinition( loginUser, projectCode, 46, ReleaseState.ONLINE); Assert.assertEquals(Status.SUCCESS, onlineRes.get(Constants.STATUS)); // project check auth success, processs definition online ProcessDefinition processDefinition1 = getProcessDefinition(); processDefinition1.setResourceIds("1,2"); Map<String, Object> onlineWithResourceRes = processDefinitionService.releaseProcessDefinition( loginUser, projectCode, 46, ReleaseState.ONLINE); Assert.assertEquals(Status.SUCCESS, onlineWithResourceRes.get(Constants.STATUS)); // release error code Map<String, Object> failRes = processDefinitionService.releaseProcessDefinition( loginUser, projectCode, 46, ReleaseState.getEnum(2)); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR, failRes.get(Constants.STATUS)); } @Test public void testVerifyProcessDefinitionName() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); //project check auth fail Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> map = processDefinitionService.verifyProcessDefinitionName(loginUser, projectCode, "test_pdf"); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, map.get(Constants.STATUS)); //project check auth success, process not exist putMsg(result, Status.SUCCESS, projectCode); Mockito.when(processDefineMapper.verifyByDefineName(project.getCode(), "test_pdf")).thenReturn(null); Map<String, Object> processNotExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser, projectCode, "test_pdf"); Assert.assertEquals(Status.SUCCESS, processNotExistRes.get(Constants.STATUS)); //process exist Mockito.when(processDefineMapper.verifyByDefineName(project.getCode(), "test_pdf")).thenReturn(getProcessDefinition()); Map<String, Object> processExistRes = processDefinitionService.verifyProcessDefinitionName(loginUser, projectCode, "test_pdf"); Assert.assertEquals(Status.PROCESS_DEFINITION_NAME_EXIST, processExistRes.get(Constants.STATUS)); } @Test public void testCheckProcessNodeList() { Map<String, Object> dataNotValidRes = processDefinitionService.checkProcessNodeList(null); Assert.assertEquals(Status.DATA_IS_NOT_VALID, dataNotValidRes.get(Constants.STATUS)); Map<String, Object> taskEmptyRes = processDefinitionService.checkProcessNodeList(taskRelationJson); Assert.assertEquals(Status.PROCESS_DAG_IS_EMPTY, taskEmptyRes.get(Constants.STATUS)); } @Test public void testGetTaskNodeListByDefinitionCode() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); //project check auth fail Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); //process definition not exist Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(null); Map<String, Object> processDefinitionNullRes = processDefinitionService.getTaskNodeListByDefinitionCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionNullRes.get(Constants.STATUS)); //success ProcessDefinition processDefinition = getProcessDefinition(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(processService.genDagData(Mockito.any())).thenReturn(new DagData(processDefinition, null, null)); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); Map<String, Object> dataNotValidRes = processDefinitionService.getTaskNodeListByDefinitionCode(loginUser, projectCode, 46L); Assert.assertEquals(Status.SUCCESS, dataNotValidRes.get(Constants.STATUS)); } @Test public void testGetTaskNodeListByDefinitionCodes() { long projectCode = 1L; Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Project project = getProject(projectCode); User loginUser = new User(); loginUser.setId(-1); loginUser.setUserType(UserType.GENERAL_USER); //project check auth fail Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); //process definition not exist String defineCodes = "46"; Set<Long> defineCodeSet = Lists.newArrayList(defineCodes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); Mockito.when(processDefineMapper.queryByCodes(defineCodeSet)).thenReturn(null); Map<String, Object> processNotExistRes = processDefinitionService.getNodeListMapByDefinitionCodes(loginUser, projectCode, defineCodes); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processNotExistRes.get(Constants.STATUS)); putMsg(result, Status.SUCCESS, projectCode); ProcessDefinition processDefinition = getProcessDefinition(); List<ProcessDefinition> processDefinitionList = new ArrayList<>(); processDefinitionList.add(processDefinition); Mockito.when(processDefineMapper.queryByCodes(defineCodeSet)).thenReturn(processDefinitionList); Mockito.when(processService.genDagData(Mockito.any())).thenReturn(new DagData(processDefinition, null, null)); Map<String, Object> successRes = processDefinitionService.getNodeListMapByDefinitionCodes(loginUser, projectCode, defineCodes); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testQueryAllProcessDefinitionByProjectCode() { User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.GENERAL_USER); Map<String, Object> result = new HashMap<>(); long projectCode = 2L; Project project = getProject(projectCode); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(project); putMsg(result, Status.SUCCESS, projectCode); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); ProcessDefinition processDefinition = getProcessDefinition(); List<ProcessDefinition> processDefinitionList = new ArrayList<>(); processDefinitionList.add(processDefinition); Mockito.when(processDefineMapper.queryAllDefinitionList(projectCode)).thenReturn(processDefinitionList); Map<String, Object> successRes = processDefinitionService.queryAllProcessDefinitionByProjectCode(loginUser, projectCode); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testViewTree() { //process definition not exist ProcessDefinition processDefinition = getProcessDefinition(); Map<String, Object> processDefinitionNullRes = processDefinitionService.viewTree(46, 10); Assert.assertEquals(Status.PROCESS_DEFINE_NOT_EXIST, processDefinitionNullRes.get(Constants.STATUS)); //task instance not exist Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); Mockito.when(processService.genDagGraph(processDefinition)).thenReturn(new DAG<>()); Map<String, Object> taskNullRes = processDefinitionService.viewTree(46, 10); Assert.assertEquals(Status.SUCCESS, taskNullRes.get(Constants.STATUS)); //task instance exist Map<String, Object> taskNotNuLLRes = processDefinitionService.viewTree(46, 10); Assert.assertEquals(Status.SUCCESS, taskNotNuLLRes.get(Constants.STATUS)); } @Test public void testSubProcessViewTree() { ProcessDefinition processDefinition = getProcessDefinition(); Mockito.when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); Mockito.when(processService.genDagGraph(processDefinition)).thenReturn(new DAG<>()); Map<String, Object> taskNotNuLLRes = processDefinitionService.viewTree(46, 10); Assert.assertEquals(Status.SUCCESS, taskNotNuLLRes.get(Constants.STATUS)); } @Test public void testUpdateProcessDefinition() { User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.ADMIN_USER); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS); long projectCode = 1L; Project project = getProject(projectCode); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> updateResult = processDefinitionService.updateProcessDefinition(loginUser, projectCode, "test", 1, "", "", "", 0, "root", null, null); Assert.assertEquals(Status.DATA_IS_NOT_VALID, updateResult.get(Constants.STATUS)); } @Test public void testBatchExportProcessDefinitionByCodes() { processDefinitionService.batchExportProcessDefinitionByCodes(null, 1L, null, null); User loginUser = new User(); loginUser.setId(1); loginUser.setUserType(UserType.ADMIN_USER); long projectCode = 1L; Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(getProject(projectCode)); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); processDefinitionService.batchExportProcessDefinitionByCodes( loginUser, projectCode, "1", null); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(1); Map<String, Object> checkResult = new HashMap<>(); checkResult.put(Constants.STATUS, Status.SUCCESS); Mockito.when(projectMapper.queryByCode(projectCode)).thenReturn(project); Mockito.when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(checkResult); HttpServletResponse response = mock(HttpServletResponse.class); DagData dagData = new DagData(getProcessDefinition(), null, null); Mockito.when(processService.genDagData(Mockito.any())).thenReturn(dagData); processDefinitionService.batchExportProcessDefinitionByCodes(loginUser, projectCode, "1", response); Assert.assertNotNull(processDefinitionService.exportProcessDagData(processDefinition)); } /** * get mock processDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinition() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(46); processDefinition.setProjectCode(1L); processDefinition.setName("test_pdf"); processDefinition.setTenantId(1); processDefinition.setDescription(""); processDefinition.setCode(46L); return processDefinition; } /** * get mock Project * * @param projectCode projectCode * @return Project */ private Project getProject(long projectCode) { Project project = new Project(); project.setCode(projectCode); project.setId(1); project.setName("test"); project.setUserId(1); return project; } private List<ProcessTaskRelation> getProcessTaskRelation(long projectCode) { List<ProcessTaskRelation> processTaskRelations = new ArrayList<>(); ProcessTaskRelation processTaskRelation = new ProcessTaskRelation(); processTaskRelation.setProjectCode(projectCode); processTaskRelation.setProcessDefinitionCode(46L); processTaskRelation.setProcessDefinitionVersion(1); processTaskRelations.add(processTaskRelation); return processTaskRelations; } /** * get mock schedule * * @return schedule */ private Schedule getSchedule() { Date date = new Date(); Schedule schedule = new Schedule(); schedule.setId(46); schedule.setProcessDefinitionCode(1); schedule.setStartTime(date); schedule.setEndTime(date); schedule.setCrontab("0 0 5 * * ? *"); schedule.setFailureStrategy(FailureStrategy.END); schedule.setUserId(1); schedule.setReleaseState(ReleaseState.OFFLINE); schedule.setProcessInstancePriority(Priority.MEDIUM); schedule.setWarningType(WarningType.NONE); schedule.setWarningGroupId(1); schedule.setWorkerGroup(Constants.DEFAULT_WORKER_GROUP); return schedule; } private void putMsg(Map<String, Object> result, Status status, Object... statusParams) { result.put(Constants.STATUS, status); if (statusParams != null && statusParams.length > 0) { result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams)); } else { result.put(Constants.MSG, status.getMsg()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,284
[Bug] [API] Api transaction does not take effect
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Api transaction does not take effect, such as processDefiniton create/update api ### What you expected to happen Transaction operations should work ### How to reproduce The second step-in table operation failed. The first step-in table operation will not be rolled back ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6284
https://github.com/apache/dolphinscheduler/pull/6288
7e8febed4de0c27f67bf67811d2314762dce2733
d7160874e4f086fafff8675123cb8de7f46cffc1
"2021-09-21T04:24:18Z"
java
"2021-09-22T12:38:22Z"
dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ProcessInstanceServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.impl.LoggerServiceImpl; import org.apache.dolphinscheduler.api.service.impl.ProcessInstanceServiceImpl; import org.apache.dolphinscheduler.api.service.impl.ProjectServiceImpl; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.entity.WorkerGroup; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.service.process.ProcessService; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; /** * process instance service test */ @RunWith(MockitoJUnitRunner.Silent.class) public class ProcessInstanceServiceTest { @InjectMocks ProcessInstanceServiceImpl processInstanceService; @Mock ProjectMapper projectMapper; @Mock ProjectServiceImpl projectService; @Mock ProcessService processService; @Mock ProcessInstanceMapper processInstanceMapper; @Mock ProcessDefinitionLogMapper processDefinitionLogMapper; @Mock ProcessDefinitionMapper processDefineMapper; @Mock ProcessDefinitionService processDefinitionService; @Mock TaskInstanceMapper taskInstanceMapper; @Mock LoggerServiceImpl loggerService; @Mock UsersService usersService; @Mock TenantMapper tenantMapper; private String shellJson = "[{\"name\":\"\",\"preTaskCode\":0,\"preTaskVersion\":0,\"postTaskCode\":123456789," + "\"postTaskVersion\":1,\"conditionType\":0,\"conditionParams\":\"{}\"},{\"name\":\"\",\"preTaskCode\":123456789," + "\"preTaskVersion\":1,\"postTaskCode\":123451234,\"postTaskVersion\":1,\"conditionType\":0,\"conditionParams\":\"{}\"}]"; private String taskJson = "[{\"name\":\"shell1\",\"description\":\"\",\"taskType\":\"SHELL\",\"taskParams\":{\"resourceList\":[]," + "\"localParams\":[],\"rawScript\":\"echo 1\",\"conditionResult\":{\"successNode\":[\"\"],\"failedNode\":[\"\"]},\"dependence\":{}}," + "\"flag\":\"NORMAL\",\"taskPriority\":\"MEDIUM\",\"workerGroup\":\"default\",\"failRetryTimes\":\"0\",\"failRetryInterval\":\"1\"," + "\"timeoutFlag\":\"CLOSE\",\"timeoutNotifyStrategy\":\"\",\"timeout\":null,\"delayTime\":\"0\"},{\"name\":\"shell2\",\"description\":\"\"," + "\"taskType\":\"SHELL\",\"taskParams\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"echo 2\",\"conditionResult\":{\"successNode\"" + ":[\"\"],\"failedNode\":[\"\"]},\"dependence\":{}},\"flag\":\"NORMAL\",\"taskPriority\":\"MEDIUM\",\"workerGroup\":\"default\"," + "\"failRetryTimes\":\"0\",\"failRetryInterval\":\"1\",\"timeoutFlag\":\"CLOSE\",\"timeoutNotifyStrategy\":\"\",\"timeout\":null,\"delayTime\":\"0\"}]"; @Test public void testQueryProcessInstanceList() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Result proejctAuthFailRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 46, "2020-01-01 00:00:00", "2020-01-02 00:00:00", "", "test_user", ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.PROJECT_NOT_FOUNT.getCode(), (int) proejctAuthFailRes.getCode()); Date start = DateUtils.getScheduleDate("2020-01-01 00:00:00"); Date end = DateUtils.getScheduleDate("2020-01-02 00:00:00"); ProcessInstance processInstance = getProcessInstance(); List<ProcessInstance> processInstanceList = new ArrayList<>(); Page<ProcessInstance> pageReturn = new Page<>(1, 10); processInstanceList.add(processInstance); pageReturn.setRecords(processInstanceList); // data parameter check putMsg(result, Status.SUCCESS, projectCode); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processDefineMapper.selectById(Mockito.anyInt())).thenReturn(getProcessDefinition()); when(processInstanceMapper.queryProcessInstanceListPaging(Mockito.any(Page.class) , Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), eq("192.168.xx.xx"), Mockito.any(), Mockito.any())).thenReturn(pageReturn); Result dataParameterRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 1, "20200101 00:00:00", "20200102 00:00:00", "", loginUser.getUserName(), ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR.getCode(), (int) dataParameterRes.getCode()); //project auth success putMsg(result, Status.SUCCESS, projectCode); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(usersService.queryUser(loginUser.getId())).thenReturn(loginUser); when(usersService.getUserIdByName(loginUser.getUserName())).thenReturn(loginUser.getId()); when(processInstanceMapper.queryProcessInstanceListPaging(Mockito.any(Page.class), eq(project.getCode()), eq(1L), eq(""), eq(-1), Mockito.any(), eq("192.168.xx.xx"), eq(start), eq(end))).thenReturn(pageReturn); when(usersService.queryUser(processInstance.getExecutorId())).thenReturn(loginUser); Result successRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 1, "2020-01-01 00:00:00", "2020-01-02 00:00:00", "", loginUser.getUserName(), ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int)successRes.getCode()); // data parameter empty when(processInstanceMapper.queryProcessInstanceListPaging(Mockito.any(Page.class), eq(project.getCode()), eq(1L), eq(""), eq(-1), Mockito.any(), eq("192.168.xx.xx"), eq(null), eq(null))).thenReturn(pageReturn); successRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 1, "", "", "", loginUser.getUserName(), ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int)successRes.getCode()); //executor null when(usersService.queryUser(loginUser.getId())).thenReturn(null); when(usersService.getUserIdByName(loginUser.getUserName())).thenReturn(-1); Result executorExistRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 1, "2020-01-01 00:00:00", "2020-01-02 00:00:00", "", "admin", ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int)executorExistRes.getCode()); //executor name empty when(processInstanceMapper.queryProcessInstanceListPaging(Mockito.any(Page.class), eq(project.getCode()), eq(1L), eq(""), eq(0), Mockito.any(), eq("192.168.xx.xx"), eq(start), eq(end))).thenReturn(pageReturn); Result executorEmptyRes = processInstanceService.queryProcessInstanceList(loginUser, projectCode, 1, "2020-01-01 00:00:00", "2020-01-02 00:00:00", "", "", ExecutionStatus.SUBMITTED_SUCCESS, "192.168.xx.xx", 1, 10); Assert.assertEquals(Status.SUCCESS.getCode(), (int)executorEmptyRes.getCode()); } @Test public void testQueryTopNLongestRunningProcessInstance() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(5); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); int size = 10; String startTime = "2020-01-01 00:00:00"; String endTime = "2020-08-02 00:00:00"; Date start = DateUtils.getScheduleDate(startTime); Date end = DateUtils.getScheduleDate(endTime); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.queryTopNLongestRunningProcessInstance(loginUser, projectCode, size, startTime, endTime); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //project auth success putMsg(result, Status.SUCCESS, projectCode); ProcessInstance processInstance = getProcessInstance(); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(usersService.queryUser(loginUser.getId())).thenReturn(loginUser); when(usersService.getUserIdByName(loginUser.getUserName())).thenReturn(loginUser.getId()); when(usersService.queryUser(processInstance.getExecutorId())).thenReturn(loginUser); Map<String, Object> successRes = processInstanceService.queryTopNLongestRunningProcessInstance(loginUser, projectCode, size, startTime, endTime); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testQueryProcessInstanceById() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.queryProcessInstanceById(loginUser, projectCode, 1); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //project auth success ProcessInstance processInstance = getProcessInstance(); putMsg(result, Status.SUCCESS, projectCode); ProcessDefinition processDefinition = getProcessDefinition(); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findProcessInstanceDetailById(processInstance.getId())).thenReturn(processInstance); when(processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion())).thenReturn(processDefinition); Map<String, Object> successRes = processInstanceService.queryProcessInstanceById(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); //worker group null Map<String, Object> workerNullRes = processInstanceService.queryProcessInstanceById(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, workerNullRes.get(Constants.STATUS)); //worker group exist WorkerGroup workerGroup = getWorkGroup(); Map<String, Object> workerExistRes = processInstanceService.queryProcessInstanceById(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, workerExistRes.get(Constants.STATUS)); } @Test public void testQueryTaskListByProcessId() throws IOException { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.queryTaskListByProcessId(loginUser, projectCode, 1); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //project auth success putMsg(result, Status.SUCCESS, projectCode); ProcessInstance processInstance = getProcessInstance(); processInstance.setState(ExecutionStatus.SUCCESS); TaskInstance taskInstance = new TaskInstance(); taskInstance.setTaskType(TaskType.SHELL.getDesc()); List<TaskInstance> taskInstanceList = new ArrayList<>(); taskInstanceList.add(taskInstance); Result res = new Result(); res.setCode(Status.SUCCESS.ordinal()); res.setData("xxx"); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findProcessInstanceDetailById(processInstance.getId())).thenReturn(processInstance); when(processService.findValidTaskListByProcessId(processInstance.getId())).thenReturn(taskInstanceList); when(loggerService.queryLog(taskInstance.getId(), 0, 4098)).thenReturn(res); Map<String, Object> successRes = processInstanceService.queryTaskListByProcessId(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testParseLogForDependentResult() throws IOException { String logString = "[INFO] 2019-03-19 17:11:08.475 org.apache.dolphinscheduler.server.worker.log.TaskLogger:[172]" + " - [taskAppId=TASK_223_10739_452334] dependent item complete :|| 223-ALL-day-last1Day,SUCCESS\n" + "[INFO] 2019-03-19 17:11:08.476 org.apache.dolphinscheduler.server.worker.runner.TaskScheduleThread:[172]" + " - task : 223_10739_452334 exit status code : 0\n" + "[root@node2 current]# "; Map<String, DependResult> resultMap = processInstanceService.parseLogForDependentResult(logString); Assert.assertEquals(1, resultMap.size()); } @Test public void testQuerySubProcessInstanceByTaskId() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectCode, 1); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //task null putMsg(result, Status.SUCCESS, projectCode); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findTaskInstanceById(1)).thenReturn(null); Map<String, Object> taskNullRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectCode, 1); Assert.assertEquals(Status.TASK_INSTANCE_NOT_EXISTS, taskNullRes.get(Constants.STATUS)); //task not sub process TaskInstance taskInstance = getTaskInstance(); taskInstance.setTaskType(TaskType.HTTP.getDesc()); taskInstance.setProcessInstanceId(1); putMsg(result, Status.SUCCESS, projectCode); when(processService.findTaskInstanceById(1)).thenReturn(taskInstance); Map<String, Object> notSubprocessRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectCode, 1); Assert.assertEquals(Status.TASK_INSTANCE_NOT_SUB_WORKFLOW_INSTANCE, notSubprocessRes.get(Constants.STATUS)); //sub process not exist TaskInstance subTask = getTaskInstance(); subTask.setTaskType(TaskType.SUB_PROCESS.getDesc()); subTask.setProcessInstanceId(1); putMsg(result, Status.SUCCESS, projectCode); when(processService.findTaskInstanceById(subTask.getId())).thenReturn(subTask); when(processService.findSubProcessInstance(subTask.getProcessInstanceId(), subTask.getId())).thenReturn(null); Map<String, Object> subprocessNotExistRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectCode, 1); Assert.assertEquals(Status.SUB_PROCESS_INSTANCE_NOT_EXIST, subprocessNotExistRes.get(Constants.STATUS)); //sub process exist ProcessInstance processInstance = getProcessInstance(); putMsg(result, Status.SUCCESS, projectCode); when(processService.findSubProcessInstance(taskInstance.getProcessInstanceId(), taskInstance.getId())).thenReturn(processInstance); Map<String, Object> subprocessExistRes = processInstanceService.querySubProcessInstanceByTaskId(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, subprocessExistRes.get(Constants.STATUS)); } @Test public void testUpdateProcessInstance() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.updateProcessInstance(loginUser, projectCode, 1, shellJson, taskJson, "2020-02-21 00:00:00", true, "", "", 0, ""); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //process instance null putMsg(result, Status.SUCCESS, projectCode); ProcessInstance processInstance = getProcessInstance(); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findProcessInstanceDetailById(1)).thenReturn(null); Map<String, Object> processInstanceNullRes = processInstanceService.updateProcessInstance(loginUser, projectCode, 1, shellJson, taskJson,"2020-02-21 00:00:00", true, "", "", 0, ""); Assert.assertEquals(Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceNullRes.get(Constants.STATUS)); //process instance not finish when(processService.findProcessInstanceDetailById(1)).thenReturn(processInstance); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); putMsg(result, Status.SUCCESS, projectCode); Map<String, Object> processInstanceNotFinishRes = processInstanceService.updateProcessInstance(loginUser, projectCode, 1, shellJson, taskJson,"2020-02-21 00:00:00", true, "", "", 0, ""); Assert.assertEquals(Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstanceNotFinishRes.get(Constants.STATUS)); //process instance finish processInstance.setState(ExecutionStatus.SUCCESS); processInstance.setTimeout(3000); processInstance.setCommandType(CommandType.STOP); processInstance.setProcessDefinitionCode(46L); processInstance.setProcessDefinitionVersion(1); ProcessDefinition processDefinition = getProcessDefinition(); processDefinition.setId(1); processDefinition.setUserId(1); Tenant tenant = getTenant(); when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); when(tenantMapper.queryByTenantCode("root")).thenReturn(tenant); when(processService.getTenantForProcess(Mockito.anyInt(), Mockito.anyInt())).thenReturn(tenant); when(processService.updateProcessInstance(processInstance)).thenReturn(1); when(processDefinitionService.checkProcessNodeList(shellJson)).thenReturn(result); putMsg(result, Status.SUCCESS, projectCode); Map<String, Object> processInstanceFinishRes = processInstanceService.updateProcessInstance(loginUser, projectCode, 1, shellJson, taskJson,"2020-02-21 00:00:00", true, "", "", 0, "root"); Assert.assertEquals(Status.UPDATE_PROCESS_DEFINITION_ERROR, processInstanceFinishRes.get(Constants.STATUS)); //success when(processDefineMapper.queryByCode(46L)).thenReturn(processDefinition); putMsg(result, Status.SUCCESS, projectCode); Map<String, Object> successRes = processInstanceService.updateProcessInstance(loginUser, projectCode, 1, shellJson, taskJson,"2020-02-21 00:00:00", false, "", "", 0, "root"); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testQueryParentInstanceBySubId() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //project auth fail when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); Map<String, Object> proejctAuthFailRes = processInstanceService.queryParentInstanceBySubId(loginUser, projectCode, 1); Assert.assertEquals(Status.PROJECT_NOT_FOUNT, proejctAuthFailRes.get(Constants.STATUS)); //process instance null putMsg(result, Status.SUCCESS, projectCode); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findProcessInstanceDetailById(1)).thenReturn(null); Map<String, Object> processInstanceNullRes = processInstanceService.queryParentInstanceBySubId(loginUser, projectCode, 1); Assert.assertEquals(Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceNullRes.get(Constants.STATUS)); //not sub process ProcessInstance processInstance = getProcessInstance(); processInstance.setIsSubProcess(Flag.NO); putMsg(result, Status.SUCCESS, projectCode); when(processService.findProcessInstanceDetailById(1)).thenReturn(processInstance); Map<String, Object> notSubProcessRes = processInstanceService.queryParentInstanceBySubId(loginUser, projectCode, 1); Assert.assertEquals(Status.PROCESS_INSTANCE_NOT_SUB_PROCESS_INSTANCE, notSubProcessRes.get(Constants.STATUS)); //sub process processInstance.setIsSubProcess(Flag.YES); putMsg(result, Status.SUCCESS, projectCode); when(processService.findParentProcessInstance(1)).thenReturn(null); Map<String, Object> subProcessNullRes = processInstanceService.queryParentInstanceBySubId(loginUser, projectCode, 1); Assert.assertEquals(Status.SUB_PROCESS_INSTANCE_NOT_EXIST, subProcessNullRes.get(Constants.STATUS)); //success putMsg(result, Status.SUCCESS, projectCode); when(processService.findParentProcessInstance(1)).thenReturn(processInstance); Map<String, Object> successRes = processInstanceService.queryParentInstanceBySubId(loginUser, projectCode, 1); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testDeleteProcessInstanceById() { long projectCode = 1L; User loginUser = getAdminUser(); Project project = getProject(projectCode); Map<String, Object> result = new HashMap<>(); putMsg(result, Status.PROJECT_NOT_FOUNT, projectCode); //process instance null putMsg(result, Status.SUCCESS, projectCode); when(projectMapper.queryByCode(projectCode)).thenReturn(project); when(projectService.checkProjectAndAuth(loginUser, project, projectCode)).thenReturn(result); when(processService.findProcessInstanceDetailById(1)).thenReturn(null); } @Test public void testViewVariables() { //process instance not null ProcessInstance processInstance = getProcessInstance(); processInstance.setCommandType(CommandType.SCHEDULER); processInstance.setScheduleTime(new Date()); processInstance.setGlobalParams(""); when(processInstanceMapper.queryDetailById(1)).thenReturn(processInstance); Map<String, Object> successRes = processInstanceService.viewVariables(1); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } @Test public void testViewGantt() throws Exception { ProcessInstance processInstance = getProcessInstance(); TaskInstance taskInstance = getTaskInstance(); taskInstance.setState(ExecutionStatus.RUNNING_EXECUTION); taskInstance.setStartTime(new Date()); when(processInstanceMapper.queryDetailById(1)).thenReturn(processInstance); when(processDefinitionLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion() )).thenReturn(new ProcessDefinitionLog()); when(processInstanceMapper.queryDetailById(1)).thenReturn(processInstance); when(taskInstanceMapper.queryByInstanceIdAndName(Mockito.anyInt(), Mockito.any())).thenReturn(taskInstance); DAG<String, TaskNode, TaskNodeRelation> graph = new DAG<>(); for (int i = 1; i <= 7; ++i) { graph.addNode(i + "", new TaskNode()); } when(processService.genDagGraph(Mockito.any(ProcessDefinition.class))) .thenReturn(graph); Map<String, Object> successRes = processInstanceService.viewGantt(1); Assert.assertEquals(Status.SUCCESS, successRes.get(Constants.STATUS)); } /** * get Mock Admin User * * @return admin user */ private User getAdminUser() { User loginUser = new User(); loginUser.setId(-1); loginUser.setUserName("admin"); loginUser.setUserType(UserType.GENERAL_USER); return loginUser; } /** * get mock Project * * @param projectCode projectCode * @return Project */ private Project getProject(long projectCode) { Project project = new Project(); project.setCode(projectCode); project.setId(1); project.setName("project_test1"); project.setUserId(1); return project; } /** * get Mock process instance * * @return process instance */ private ProcessInstance getProcessInstance() { ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(1); processInstance.setName("test_process_instance"); processInstance.setProcessDefinitionCode(46L); processInstance.setProcessDefinitionVersion(1); processInstance.setStartTime(new Date()); processInstance.setEndTime(new Date()); return processInstance; } /** * get mock processDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinition() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(46L); processDefinition.setVersion(1); processDefinition.setId(46); processDefinition.setName("test_pdf"); processDefinition.setProjectCode(2L); processDefinition.setTenantId(1); processDefinition.setDescription(""); return processDefinition; } private Tenant getTenant() { Tenant tenant = new Tenant(); tenant.setId(1); tenant.setTenantCode("root"); return tenant; } /** * get Mock worker group * * @return worker group */ private WorkerGroup getWorkGroup() { WorkerGroup workerGroup = new WorkerGroup(); workerGroup.setName("test_workergroup"); return workerGroup; } /** * get Mock task instance * * @return task instance */ private TaskInstance getTaskInstance() { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(1); taskInstance.setName("test_task_instance"); taskInstance.setStartTime(new Date()); taskInstance.setEndTime(new Date()); taskInstance.setExecutorId(-1); return taskInstance; } private void putMsg(Map<String, Object> result, Status status, Object... statusParams) { result.put(Constants.STATUS, status); if (statusParams != null && statusParams.length > 0) { result.put(Constants.MSG, MessageFormat.format(status.getMsg(), statusParams)); } else { result.put(Constants.MSG, status.getMsg()); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,301
[Bug] [UI] The task instance successfully enforces the failed task and reports an error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened ![image](https://user-images.githubusercontent.com/42576980/134448048-bc16a578-e513-437d-b15e-756379dfcb04.png) ### What you expected to happen Forced success will succeed ### How to reproduce Force successful operation for failed tasks ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6301
https://github.com/apache/dolphinscheduler/pull/6305
91150628385bbac709d7e0030ae042e5e0cc396b
b8ed7e6f2d38aefdf4902253416c91e2cc02f8eb
"2021-09-23T02:53:07Z"
java
"2021-09-23T06:26:15Z"
dolphinscheduler-ui/src/js/conf/home/store/dag/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import _ from 'lodash' import io from '@/module/io' export default { /** * Task status acquisition */ getTaskState ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}/tasks`, { processInstanceId: payload }, res => { state.taskInstances = res.data.taskList resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process definition status */ editProcessState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/${payload.code}/release`, { name: payload.name, releaseState: payload.releaseState }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get process definition versions pagination info */ getProcessDefinitionVersionsPage ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * switch process definition version */ switchProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * delete process definition version */ deleteProcessDefinitionVersion ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}/versions/${payload.version}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Update process instance status */ editExecutorsState ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/execute`, { processInstanceId: payload.processInstanceId, executeType: payload.executeType }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Verify that the DGA map name exists */ verifDAGName ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/verify-name`, { name: payload }, res => { state.name = payload resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ getProcessDetails ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload}`, { }, res => { // process definition code state.code = res.data.processDefinition.code // version state.version = res.data.processDefinition.version // name state.name = res.data.processDefinition.name // description state.description = res.data.processDefinition.description // taskRelationJson state.connects = res.data.processTaskRelationList // locations state.locations = JSON.parse(res.data.processDefinition.locations) // global params state.globalParams = res.data.processDefinition.globalParamList // timeout state.timeout = res.data.processDefinition.timeout // tenantCode state.tenantCode = res.data.processDefinition.tenantCode || 'default' // tasks info state.tasks = res.data.taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process definition DAG diagram details */ copyProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-copy`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get process definition DAG diagram details */ moveProcess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-move`, { codes: payload.codes, targetProjectCode: payload.targetProjectCode }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get all the items created by the logged in user */ getAllItems ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/created-and-authed', {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance DAG diagram details */ getInstancedetail ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload}`, { }, res => { const { processDefinition, processTaskRelationList, taskDefinitionList } = res.data.dagData // code state.code = processDefinition.code // version state.version = processDefinition.version // name state.name = res.data.name // desc state.description = processDefinition.description // connects state.connects = processTaskRelationList // locations state.locations = JSON.parse(processDefinition.locations) // global params state.globalParams = processDefinition.globalParamList // timeout state.timeout = processDefinition.timeout // tenantCode state.tenantCode = res.data.tenantCode || 'default' // tasks info state.tasks = taskDefinitionList.map(task => _.pick(task, [ 'code', 'name', 'version', 'description', 'delayTime', 'taskType', 'taskParams', 'flag', 'taskPriority', 'workerGroup', 'failRetryTimes', 'failRetryInterval', 'timeoutFlag', 'timeoutNotifyStrategy', 'timeout', 'environmentCode' ])) // startup parameters state.startup = _.assign(state.startup, _.pick(res.data, ['commandType', 'failureStrategy', 'processInstancePriority', 'workerGroup', 'warningType', 'warningGroupId', 'receivers', 'receiversCc'])) state.startup.commandParam = JSON.parse(res.data.commandParam) resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Create process definition */ saveDAGchart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout }, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process definition update */ updateDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/process-definition/${payload}`, { locations: JSON.stringify(state.locations), name: _.trim(state.name), taskDefinitionJson: JSON.stringify(state.tasks), taskRelationJson: JSON.stringify(state.connects), tenantCode: state.tenantCode, description: _.trim(state.description), globalParams: JSON.stringify(state.globalParams), timeout: state.timeout, releaseState: state.releaseState }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Process instance update */ updateInstance ({ state }, payload) { return new Promise((resolve, reject) => { const data = { globalParams: state.globalParams, tasks: state.tasks, tenantId: state.tenantId, timeout: state.timeout } io.put(`projects/${state.projectCode}/process-instances/${payload}`, { processInstanceJson: JSON.stringify(data), locations: JSON.stringify(state.locations), connects: JSON.stringify(state.connects), syncDefine: state.syncDefine }, res => { resolve(res) state.isEditDag = false }).catch(e => { reject(e) }) }) }, /** * Get a list of process definitions (sub-workflow usage is not paged) */ getProcessList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.processListS.length) { resolve() return } io.get(`projects/${state.projectCode}/process-definition/list`, payload, res => { state.processListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions (list page usage with pagination) */ getProcessListP ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition`, payload, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of project */ getProjectList ({ state }, payload) { return new Promise((resolve, reject) => { if (state.projectListS.length) { resolve() return } io.get('projects/created-and-authed', payload, res => { state.projectListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get a list of process definitions by project code */ getProcessByProjectCode ({ state }, code) { return new Promise((resolve, reject) => { io.get(`projects/${code}/process-definition/all`, res => { resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get datasource */ getDatasourceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('datasources/list', { type: payload }, res => { resolve(res) }).catch(res => { reject(res) }) }) }, /** * get resources */ getResourcesList ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListS.length) { resolve() return } io.get('resources/list', { type: 'FILE' }, res => { state.resourcesListS = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * get jar */ getResourcesListJar ({ state }) { return new Promise((resolve, reject) => { if (state.resourcesListJar.length) { resolve() return } io.get('resources/query-by-type', { type: 'FILE' }, res => { state.resourcesListJar = res.data resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get process instance */ getProcessInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances`, payload, res => { state.instanceListS = res.data.totalList resolve(res.data) }).catch(res => { reject(res) }) }) }, /** * Get alarm list */ getNotifyGroupList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('alert-groups/list', res => { state.notifyGroupListS = _.map(res.data, v => { return { id: v.id, code: v.groupName, disabled: false } }) resolve(_.cloneDeep(state.notifyGroupListS)) }).catch(res => { reject(res) }) }) }, /** * Process definition startup interface */ processStart ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-process-instance`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * View log */ getLog ({ state }, payload) { return new Promise((resolve, reject) => { io.get('log/detail', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get the process instance id according to the process definition id * @param taskId */ getSubProcessId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/query-sub-by-parent`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Called before the process definition starts */ getStartCheck ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/executors/start-check`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Create timing */ createSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Preview timing */ previewSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/preview`, payload, res => { resolve(res.data) // alert(res.data) }).catch(e => { reject(e) }) }) }, /** * Timing list paging */ getScheduleList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/schedules`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timing online */ scheduleOffline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/offline`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Timed offline */ scheduleOnline ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/schedules/${payload.id}/online`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Edit timing */ updateSchedule ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${state.projectCode}/schedules/${payload.id}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete process instance */ deleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete process instance */ batchDeleteInstance ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-instances/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete definition */ deleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/process-definition/${payload.code}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Batch delete definition */ batchDeleteDefinition ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/process-definition/batch-delete`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * export definition */ exportDefinition ({ state }, payload) { const downloadBlob = (data, fileNameS = 'json') => { if (!data) { return } const blob = new Blob([data]) const fileName = `${fileNameS}.json` if ('download' in document.createElement('a')) { // 不是IE浏览器 const url = window.URL.createObjectURL(blob) const link = document.createElement('a') link.style.display = 'none' link.href = url link.setAttribute('download', fileName) document.body.appendChild(link) link.click() document.body.removeChild(link) // 下载完成移除元素 window.URL.revokeObjectURL(url) // 释放掉blob对象 } else { // IE 10+ window.navigator.msSaveBlob(blob, fileName) } } io.post(`projects/${state.projectCode}/process-definition/batch-export`, { codes: payload.codes }, res => { downloadBlob(res, payload.fileName) }, e => { }, { responseType: 'blob' }) }, /** * Process instance get variable */ getViewvariables ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.code}/view-variables`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Get udfs function based on data source */ getUdfList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('resources/udf-func/list', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task instance list */ getTaskInstanceList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-instances`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Force fail/kill/need_fault_tolerance task success */ forceTaskSuccess ({ state }, payload) { return new Promise((resolve, reject) => { io.post(`projects/${state.projectCode}/task-instances/${payload.code}/force-success`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Query task record list */ getTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query history task record list */ getHistoryTaskRecordList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/task-record/history-list-paging', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * tree chart */ getViewTree ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/view-tree`, { limit: payload.limit }, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * gantt chart */ getViewGantt ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-instances/${payload.processInstanceId}/view-gantt`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Query task node list */ getProcessTasksList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/${payload.code}/tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, getTaskListDefIdAll ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/process-definition/batch-query-tasks`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * remove timing */ deleteTiming ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${state.projectCode}/schedules/${payload.scheduleId}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, getResourceId ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`resources/${payload.id}`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, genTaskCodeList ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition/gen-task-codes`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, getTaskDefinitions ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${state.projectCode}/task-definition`, payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,302
[Bug] [API] Add a new task to an existing workflow, and the task is lost
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Add a new task to an existing workflow, and the task is lost ### What you expected to happen Tasks are added normally ### How to reproduce Update process definition, then add task ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6302
https://github.com/apache/dolphinscheduler/pull/6307
b8ed7e6f2d38aefdf4902253416c91e2cc02f8eb
0b53adeb07df72a9b888566092642a77dfccf65d
"2021-09-23T02:56:34Z"
java
"2021-09-23T07:06:49Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; break; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.toMap(taskInstance.getTaskParams(), String.class, Object.class); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinition); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { int insert = taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); int update = taskDefinitionMapper.updateById(taskDefinitionToUpdate); return update & insert; } } if (!newTaskDefinitionLogs.isEmpty()) { int insert = taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); int logInsert = taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); return logInsert & insert; } return Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,302
[Bug] [API] Add a new task to an existing workflow, and the task is lost
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Add a new task to an existing workflow, and the task is lost ### What you expected to happen Tasks are added normally ### How to reproduce Update process definition, then add task ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6302
https://github.com/apache/dolphinscheduler/pull/6307
b8ed7e6f2d38aefdf4902253416c91e2cc02f8eb
0b53adeb07df72a9b888566092642a77dfccf65d
"2021-09-23T02:56:34Z"
java
"2021-09-23T07:06:49Z"
dolphinscheduler-service/src/test/java/org/apache/dolphinscheduler/service/process/ProcessServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.mockito.ArgumentMatchers.any; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.quartz.cron.CronUtilsTest; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.JsonNode; /** * process service test */ @RunWith(MockitoJUnitRunner.class) public class ProcessServiceTest { private static final Logger logger = LoggerFactory.getLogger(CronUtilsTest.class); @InjectMocks private ProcessService processService; @Mock private CommandMapper commandMapper; @Mock private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Mock private ErrorCommandMapper errorCommandMapper; @Mock private ProcessDefinitionMapper processDefineMapper; @Mock private ProcessInstanceMapper processInstanceMapper; @Mock private UserMapper userMapper; @Mock private TaskInstanceMapper taskInstanceMapper; @Mock private TaskDefinitionLogMapper taskDefinitionLogMapper; @Mock private ProcessTaskRelationMapper processTaskRelationMapper; @Mock private ProcessDefinitionLogMapper processDefineLogMapper; @Test public void testCreateSubCommand() { ProcessInstance parentInstance = new ProcessInstance(); parentInstance.setWarningType(WarningType.SUCCESS); parentInstance.setWarningGroupId(0); TaskInstance task = new TaskInstance(); task.setTaskParams("{\"processDefinitionId\":100}}"); task.setId(10); task.setTaskCode(1L); task.setTaskDefinitionVersion(1); ProcessInstance childInstance = null; ProcessInstanceMap instanceMap = new ProcessInstanceMap(); instanceMap.setParentProcessInstanceId(1); instanceMap.setParentTaskInstanceId(10); Command command; //father history: start; child null == command type: start parentInstance.setHistoryCmd("START_PROCESS"); parentInstance.setCommandType(CommandType.START_PROCESS); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); Mockito.when(processDefineMapper.queryByDefineId(100)).thenReturn(processDefinition); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType()); //father history: start,start failure; child null == command type: start parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType()); //father history: scheduler,start failure; child null == command type: scheduler parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("SCHEDULER,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.SCHEDULER, command.getCommandType()); //father history: complement,start failure; child null == command type: complement String startString = "2020-01-01 00:00:00"; String endString = "2020-01-10 00:00:00"; parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("COMPLEMENT_DATA,START_FAILURE_TASK_PROCESS"); Map<String, String> complementMap = new HashMap<>(); complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE, startString); complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE, endString); parentInstance.setCommandParam(JSONUtils.toJsonString(complementMap)); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.COMPLEMENT_DATA, command.getCommandType()); JsonNode complementDate = JSONUtils.parseObject(command.getCommandParam()); Date start = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE).asText()); Date end = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE).asText()); Assert.assertEquals(startString, DateUtils.dateToString(start)); Assert.assertEquals(endString, DateUtils.dateToString(end)); //father history: start,failure,start failure; child not null == command type: start failure childInstance = new ProcessInstance(); parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_FAILURE_TASK_PROCESS, command.getCommandType()); } @Test public void testVerifyIsNeedCreateCommand() { List<Command> commands = new ArrayList<>(); Command command = new Command(); command.setCommandType(CommandType.REPEAT_RUNNING); command.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"111\"}"); commands.add(command); Mockito.when(commandMapper.selectList(null)).thenReturn(commands); Assert.assertFalse(processService.verifyIsNeedCreateCommand(command)); Command command1 = new Command(); command1.setCommandType(CommandType.REPEAT_RUNNING); command1.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"222\"}"); Assert.assertTrue(processService.verifyIsNeedCreateCommand(command1)); Command command2 = new Command(); command2.setCommandType(CommandType.PAUSE); Assert.assertTrue(processService.verifyIsNeedCreateCommand(command2)); } @Test public void testCreateRecoveryWaitingThreadCommand() { int id = 123; Mockito.when(commandMapper.deleteById(id)).thenReturn(1); ProcessInstance subProcessInstance = new ProcessInstance(); subProcessInstance.setIsSubProcess(Flag.YES); Command originCommand = new Command(); originCommand.setId(id); processService.createRecoveryWaitingThreadCommand(originCommand, subProcessInstance); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(111); processService.createRecoveryWaitingThreadCommand(null, subProcessInstance); Command recoverCommand = new Command(); recoverCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); processService.createRecoveryWaitingThreadCommand(recoverCommand, subProcessInstance); Command repeatRunningCommand = new Command(); recoverCommand.setCommandType(CommandType.REPEAT_RUNNING); processService.createRecoveryWaitingThreadCommand(repeatRunningCommand, subProcessInstance); ProcessInstance subProcessInstance2 = new ProcessInstance(); subProcessInstance2.setId(111); subProcessInstance2.setIsSubProcess(Flag.NO); processService.createRecoveryWaitingThreadCommand(repeatRunningCommand, subProcessInstance2); } @Test public void testHandleCommand() { //cannot construct process instance, return null; String host = "127.0.0.1"; int validThreadNum = 1; Command command = new Command(); command.setProcessDefinitionCode(222); command.setCommandType(CommandType.REPEAT_RUNNING); command.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"111\",\"" + CMD_PARAM_SUB_PROCESS_DEFINE_ID + "\":\"222\"}"); Assert.assertNull(processService.handleCommand(logger, host, validThreadNum, command)); //there is not enough thread for this command Command command1 = new Command(); command1.setProcessDefinitionCode(123); command1.setCommandParam("{\"ProcessInstanceId\":222}"); command1.setCommandType(CommandType.START_PROCESS); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); processDefinition.setCode(11L); processDefinition.setGlobalParams("[{\"prop\":\"startParam1\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"\"}]"); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(222); processInstance.setProcessDefinitionCode(11L); processInstance.setProcessDefinitionVersion(1); Mockito.when(processDefineMapper.queryByCode(command1.getProcessDefinitionCode())).thenReturn(processDefinition); Mockito.when(processDefineLogMapper.queryByDefinitionCodeAndVersion(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion())).thenReturn(new ProcessDefinitionLog(processDefinition)); Mockito.when(processInstanceMapper.queryDetailById(222)).thenReturn(processInstance); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command1)); Command command2 = new Command(); command2.setCommandParam("{\"ProcessInstanceId\":222,\"StartNodeIdList\":\"n1,n2\"}"); command2.setProcessDefinitionCode(123); command2.setCommandType(CommandType.RECOVER_SUSPENDED_PROCESS); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command2)); Command command3 = new Command(); command3.setProcessDefinitionCode(123); command3.setCommandParam("{\"WaitingThreadInstanceId\":222}"); command3.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command3)); Command command4 = new Command(); command4.setProcessDefinitionCode(123); command4.setCommandParam("{\"WaitingThreadInstanceId\":222,\"StartNodeIdList\":\"n1,n2\"}"); command4.setCommandType(CommandType.REPEAT_RUNNING); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command4)); Command command5 = new Command(); command5.setProcessDefinitionCode(123); HashMap<String, String> startParams = new HashMap<>(); startParams.put("startParam1", "testStartParam1"); HashMap<String, String> commandParams = new HashMap<>(); commandParams.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams)); command5.setCommandParam(JSONUtils.toJsonString(commandParams)); command5.setCommandType(CommandType.START_PROCESS); ProcessInstance processInstance1 = processService.handleCommand(logger, host, validThreadNum, command5); Assert.assertTrue(processInstance1.getGlobalParams().contains("\"testStartParam1\"")); } @Test public void testGetUserById() { User user = new User(); user.setId(123); Mockito.when(userMapper.selectById(123)).thenReturn(user); Assert.assertEquals(user, processService.getUserById(123)); } @Test public void testFormatTaskAppId() { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(333); taskInstance.setProcessInstanceId(222); Mockito.when(processService.findProcessInstanceById(taskInstance.getProcessInstanceId())).thenReturn(null); Assert.assertEquals("", processService.formatTaskAppId(taskInstance)); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(111); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(222); processInstance.setProcessDefinitionVersion(1); processInstance.setProcessDefinitionCode(1L); Mockito.when(processService.findProcessInstanceById(taskInstance.getProcessInstanceId())).thenReturn(processInstance); Assert.assertEquals("", processService.formatTaskAppId(taskInstance)); } @Test public void testRecurseFindSubProcessId() { int parentProcessDefineId = 1; long parentProcessDefineCode = 1L; int parentProcessDefineVersion = 1; ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(parentProcessDefineCode); processDefinition.setVersion(parentProcessDefineVersion); Mockito.when(processDefineMapper.selectById(parentProcessDefineId)).thenReturn(processDefinition); long postTaskCode = 2L; int postTaskVersion = 2; List<ProcessTaskRelationLog> relationLogList = new ArrayList<>(); ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(); processTaskRelationLog.setPostTaskCode(postTaskCode); processTaskRelationLog.setPostTaskVersion(postTaskVersion); relationLogList.add(processTaskRelationLog); Mockito.when(processTaskRelationLogMapper.queryByProcessCodeAndVersion(parentProcessDefineCode , parentProcessDefineVersion)).thenReturn(relationLogList); List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); TaskDefinitionLog taskDefinitionLog1 = new TaskDefinitionLog(); taskDefinitionLog1.setTaskParams("{\"processDefinitionId\": 123}"); taskDefinitionLogs.add(taskDefinitionLog1); Mockito.when(taskDefinitionLogMapper.queryByTaskDefinitions(Mockito.anySet())).thenReturn(taskDefinitionLogs); List<Integer> ids = new ArrayList<>(); processService.recurseFindSubProcessId(parentProcessDefineId, ids); Assert.assertEquals(1, ids.size()); } @Test public void testSwitchVersion() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); processDefinition.setProjectCode(1L); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(); processDefinitionLog.setCode(1L); processDefinitionLog.setVersion(2); Assert.assertEquals(0, processService.switchVersion(processDefinition, processDefinitionLog)); } @Test public void testGenDagGraph() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); processDefinition.setCode(11L); ProcessTaskRelation processTaskRelation = new ProcessTaskRelation(); processTaskRelation.setName("def 1"); processTaskRelation.setProcessDefinitionVersion(1); processTaskRelation.setProjectCode(1L); processTaskRelation.setProcessDefinitionCode(1L); processTaskRelation.setPostTaskCode(3L); processTaskRelation.setPreTaskCode(2L); processTaskRelation.setUpdateTime(new Date()); processTaskRelation.setCreateTime(new Date()); List<ProcessTaskRelation> list = new ArrayList<>(); list.add(processTaskRelation); TaskDefinitionLog taskDefinition = new TaskDefinitionLog(); taskDefinition.setCode(3L); taskDefinition.setName("1-test"); taskDefinition.setProjectCode(1L); taskDefinition.setTaskType(TaskType.SHELL.getDesc()); taskDefinition.setUserId(1); taskDefinition.setVersion(2); taskDefinition.setCreateTime(new Date()); taskDefinition.setUpdateTime(new Date()); TaskDefinitionLog td2 = new TaskDefinitionLog(); td2.setCode(2L); td2.setName("unit-test"); td2.setProjectCode(1L); td2.setTaskType(TaskType.SHELL.getDesc()); td2.setUserId(1); td2.setVersion(1); td2.setCreateTime(new Date()); td2.setUpdateTime(new Date()); List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); taskDefinitionLogs.add(taskDefinition); taskDefinitionLogs.add(td2); Mockito.when(taskDefinitionLogMapper.queryByTaskDefinitions(any())).thenReturn(taskDefinitionLogs); Mockito.when(processTaskRelationMapper.queryByProcessCode(Mockito.anyLong(), Mockito.anyLong())).thenReturn(list); DAG<String, TaskNode, TaskNodeRelation> stringTaskNodeTaskNodeRelationDAG = processService.genDagGraph(processDefinition); Assert.assertEquals(1, stringTaskNodeTaskNodeRelationDAG.getNodesCount()); } @Test public void testCreateCommand() { Command command = new Command(); command.setProcessDefinitionCode(123); command.setCommandParam("{\"ProcessInstanceId\":222}"); command.setCommandType(CommandType.START_PROCESS); int mockResult = 1; Mockito.when(commandMapper.insert(command)).thenReturn(mockResult); int exeMethodResult = processService.createCommand(command); Assert.assertEquals(mockResult, exeMethodResult); Mockito.verify(commandMapper, Mockito.times(1)).insert(command); } @Test public void testChangeOutParam() { TaskInstance taskInstance = new TaskInstance(); taskInstance.setProcessInstanceId(62); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(62); taskInstance.setVarPool("[{\"direct\":\"OUT\",\"prop\":\"test1\",\"type\":\"VARCHAR\",\"value\":\"\"}]"); taskInstance.setTaskParams("{\"type\":\"MYSQL\",\"datasource\":1,\"sql\":\"select id from tb_test limit 1\"," + "\"udfs\":\"\",\"sqlType\":\"0\",\"sendEmail\":false,\"displayRows\":10,\"title\":\"\"," + "\"groupId\":null,\"localParams\":[{\"prop\":\"test1\",\"direct\":\"OUT\",\"type\":\"VARCHAR\",\"value\":\"12\"}]," + "\"connParams\":\"\",\"preStatements\":[],\"postStatements\":[],\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"]," + "\\\"failedNode\\\":[\\\"\\\"]}\",\"dependence\":\"{}\"}"); processService.changeOutParam(taskInstance); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,306
[Bug] [Worker] task log path generated error.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task log path generated error. ### What you expected to happen task log-path in db is different with worker. ### How to reproduce run a process definition. you will find the shell-task's log-path in db is different with the worker. worker log-path: logs/definition_id/instance_id/task_instance_id.log db-path: /logs/definition_code/instance_code/task_instance_id.log ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6306
https://github.com/apache/dolphinscheduler/pull/6311
0b53adeb07df72a9b888566092642a77dfccf65d
163a0696a4ee71a44e02054caef9b78387a9e3b2
"2021-09-23T06:09:23Z"
java
"2021-09-23T08:51:58Z"
dolphinscheduler-spi/src/main/java/org/apache/dolphinscheduler/spi/task/TaskConstants.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.spi.task; public class TaskConstants { private TaskConstants() { throw new IllegalStateException("Utility class"); } public static final String APPLICATION_REGEX = "application_\\d+_\\d+"; /** * string false */ public static final String STRING_FALSE = "false"; /** * exit code kill */ public static final int EXIT_CODE_KILL = 137; public static final String PID = "pid"; /** * comma , */ public static final String COMMA = ","; /** * slash / */ public static final String SLASH = "/"; /** * COLON : */ public static final String COLON = ":"; /** * SPACE " " */ public static final String SPACE = " "; /** * SINGLE_SLASH / */ public static final String SINGLE_SLASH = "/"; /** * DOUBLE_SLASH // */ public static final String DOUBLE_SLASH = "//"; /** * SINGLE_QUOTES "'" */ public static final String SINGLE_QUOTES = "'"; /** * DOUBLE_QUOTES "\"" */ public static final String DOUBLE_QUOTES = "\""; /** * SEMICOLON ; */ public static final String SEMICOLON = ";"; /** * EQUAL SIGN */ public static final String EQUAL_SIGN = "="; /** * AT SIGN */ public static final String AT_SIGN = "@"; /** * sleep time */ public static final int SLEEP_TIME_MILLIS = 1000; /** * exit code failure */ public static final int EXIT_CODE_FAILURE = -1; /** * exit code success */ public static final int EXIT_CODE_SUCCESS = 0; public static final String SH = "sh"; /** * default log cache rows num,output when reach the number */ public static final int DEFAULT_LOG_ROWS_NUM = 4 * 16; /** * log flush interval?output when reach the interval */ public static final int DEFAULT_LOG_FLUSH_INTERVAL = 1000; /** * pstree, get pud and sub pid */ public static final String PSTREE = "pstree"; public static final String RWXR_XR_X = "rwxr-xr-x"; /** * task log info format */ public static final String TASK_LOG_INFO_FORMAT = "TaskLogInfo-%s"; /** * date format of yyyyMMdd */ public static final String PARAMETER_FORMAT_DATE = "yyyyMMdd"; /** * date format of yyyyMMddHHmmss */ public static final String PARAMETER_FORMAT_TIME = "yyyyMMddHHmmss"; /** * new * schedule time */ public static final String PARAMETER_SHECDULE_TIME = "schedule.time"; /** * system date(yyyyMMddHHmmss) */ public static final String PARAMETER_DATETIME = "system.datetime"; /** * system date(yyyymmdd) today */ public static final String PARAMETER_CURRENT_DATE = "system.biz.curdate"; /** * system date(yyyymmdd) yesterday */ public static final String PARAMETER_BUSINESS_DATE = "system.biz.date"; /** * the absolute path of current executing task */ public static final String PARAMETER_TASK_EXECUTE_PATH = "system.task.execute.path"; /** * the instance id of current task */ public static final String PARAMETER_TASK_INSTANCE_ID = "system.task.instance.id"; /** * month_begin */ public static final String MONTH_BEGIN = "month_begin"; /** * add_months */ public static final String ADD_MONTHS = "add_months"; /** * month_end */ public static final String MONTH_END = "month_end"; /** * week_begin */ public static final String WEEK_BEGIN = "week_begin"; /** * week_end */ public static final String WEEK_END = "week_end"; /** * timestamp */ public static final String TIMESTAMP = "timestamp"; public static final char SUBTRACT_CHAR = '-'; public static final char ADD_CHAR = '+'; public static final char MULTIPLY_CHAR = '*'; public static final char DIVISION_CHAR = '/'; public static final char LEFT_BRACE_CHAR = '('; public static final char RIGHT_BRACE_CHAR = ')'; public static final String ADD_STRING = "+"; public static final String MULTIPLY_STRING = "*"; public static final String DIVISION_STRING = "/"; public static final String LEFT_BRACE_STRING = "("; public static final char P = 'P'; public static final char N = 'N'; public static final String SUBTRACT_STRING = "-"; public static final String GLOBAL_PARAMS = "globalParams"; public static final String LOCAL_PARAMS = "localParams"; public static final String LOCAL_PARAMS_LIST = "localParamsList"; public static final String SUBPROCESS_INSTANCE_ID = "subProcessInstanceId"; public static final String PROCESS_INSTANCE_STATE = "processInstanceState"; public static final String PARENT_WORKFLOW_INSTANCE = "parentWorkflowInstance"; public static final String CONDITION_RESULT = "conditionResult"; public static final String SWITCH_RESULT = "switchResult"; public static final String DEPENDENCE = "dependence"; public static final String TASK_TYPE = "taskType"; public static final String TASK_LIST = "taskList"; public static final String QUEUE = "queue"; public static final String QUEUE_NAME = "queueName"; public static final int LOG_QUERY_SKIP_LINE_NUMBER = 0; public static final int LOG_QUERY_LIMIT = 4096; /** * default display rows */ public static final int DEFAULT_DISPLAY_ROWS = 10; /** * jar */ public static final String JAR = "jar"; /** * hadoop */ public static final String HADOOP = "hadoop"; /** * -D <property>=<value> */ public static final String D = "-D"; /** * jdbc url */ public static final String JDBC_MYSQL = "jdbc:mysql://"; public static final String JDBC_POSTGRESQL = "jdbc:postgresql://"; public static final String JDBC_HIVE_2 = "jdbc:hive2://"; public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://"; public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@"; public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//"; public static final String JDBC_SQLSERVER = "jdbc:sqlserver://"; public static final String JDBC_DB2 = "jdbc:db2://"; public static final String JDBC_PRESTO = "jdbc:presto://"; /** * driver */ public static final String ORG_POSTGRESQL_DRIVER = "org.postgresql.Driver"; public static final String COM_MYSQL_JDBC_DRIVER = "com.mysql.jdbc.Driver"; public static final String ORG_APACHE_HIVE_JDBC_HIVE_DRIVER = "org.apache.hive.jdbc.HiveDriver"; public static final String COM_CLICKHOUSE_JDBC_DRIVER = "ru.yandex.clickhouse.ClickHouseDriver"; public static final String COM_ORACLE_JDBC_DRIVER = "oracle.jdbc.driver.OracleDriver"; public static final String COM_SQLSERVER_JDBC_DRIVER = "com.microsoft.sqlserver.jdbc.SQLServerDriver"; public static final String COM_DB2_JDBC_DRIVER = "com.ibm.db2.jcc.DB2Driver"; public static final String COM_PRESTO_JDBC_DRIVER = "com.facebook.presto.jdbc.PrestoDriver"; /** * datasource encryption salt */ public static final String DATASOURCE_ENCRYPTION_SALT_DEFAULT = "!@#$%^&*"; public static final String DATASOURCE_ENCRYPTION_ENABLE = "datasource.encryption.enable"; public static final String DATASOURCE_ENCRYPTION_SALT = "datasource.encryption.salt"; /** * resource storage type */ public static final String RESOURCE_STORAGE_TYPE = "resource.storage.type"; /** * kerberos */ public static final String KERBEROS = "kerberos"; /** * kerberos expire time */ public static final String KERBEROS_EXPIRE_TIME = "kerberos.expire.time"; /** * java.security.krb5.conf */ public static final String JAVA_SECURITY_KRB5_CONF = "java.security.krb5.conf"; /** * java.security.krb5.conf.path */ public static final String JAVA_SECURITY_KRB5_CONF_PATH = "java.security.krb5.conf.path"; /** * loginUserFromKeytab user */ public static final String LOGIN_USER_KEY_TAB_USERNAME = "login.user.keytab.username"; /** * loginUserFromKeytab path */ public static final String LOGIN_USER_KEY_TAB_PATH = "login.user.keytab.path"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication"; /** * hadoop.security.authentication */ public static final String HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE = "hadoop.security.authentication.startup.state"; }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,306
[Bug] [Worker] task log path generated error.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task log path generated error. ### What you expected to happen task log-path in db is different with worker. ### How to reproduce run a process definition. you will find the shell-task's log-path in db is different with the worker. worker log-path: logs/definition_id/instance_id/task_instance_id.log db-path: /logs/definition_code/instance_code/task_instance_id.log ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6306
https://github.com/apache/dolphinscheduler/pull/6311
0b53adeb07df72a9b888566092642a77dfccf65d
163a0696a4ee71a44e02054caef9b78387a9e3b2
"2021-09-23T06:09:23Z"
java
"2021-09-23T08:51:58Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/AbstractCommandExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.api; import static org.apache.dolphinscheduler.spi.task.TaskConstants.EXIT_CODE_FAILURE; import static org.apache.dolphinscheduler.spi.task.TaskConstants.EXIT_CODE_KILL; import org.apache.dolphinscheduler.plugin.task.util.LoggerUtils; import org.apache.dolphinscheduler.plugin.task.util.OSUtils; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.utils.StringUtils; import java.io.BufferedReader; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; import java.lang.reflect.Field; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * abstract command executor */ public abstract class AbstractCommandExecutor { /** * rules for extracting application ID */ protected static final Pattern APPLICATION_REGEX = Pattern.compile(TaskConstants.APPLICATION_REGEX); protected StringBuilder varPool = new StringBuilder(); /** * process */ private Process process; /** * log handler */ protected Consumer<LinkedBlockingQueue<String>> logHandler; /** * logger */ protected Logger logger; /** * log list */ protected LinkedBlockingQueue<String> logBuffer; protected boolean logOutputIsSuccess = false; /* * SHELL result string */ protected String taskResultString; /** * taskRequest */ protected TaskRequest taskRequest; public AbstractCommandExecutor(Consumer<LinkedBlockingQueue<String>> logHandler, TaskRequest taskRequest, Logger logger) { this.logHandler = logHandler; this.taskRequest = taskRequest; this.logger = logger; this.logBuffer = new LinkedBlockingQueue<>(); } public AbstractCommandExecutor(LinkedBlockingQueue<String> logBuffer) { this.logBuffer = logBuffer; } /** * build process * * @param commandFile command file * @throws IOException IO Exception */ private void buildProcess(String commandFile) throws IOException { // setting up user to run commands List<String> command = new LinkedList<>(); //init process builder ProcessBuilder processBuilder = new ProcessBuilder(); // setting up a working directory processBuilder.directory(new File(taskRequest.getExecutePath())); // merge error information to standard output stream processBuilder.redirectErrorStream(true); // setting up user to run commands command.add("sudo"); command.add("-u"); command.add(taskRequest.getTenantCode()); command.add(commandInterpreter()); command.addAll(Collections.emptyList()); command.add(commandFile); // setting commands processBuilder.command(command); process = processBuilder.start(); printCommand(command); } public TaskResponse run(String execCommand) throws IOException, InterruptedException { TaskResponse result = new TaskResponse(); int taskInstanceId = taskRequest.getTaskInstanceId(); if (null == TaskExecutionContextCacheManager.getByTaskInstanceId(taskInstanceId)) { result.setExitStatusCode(EXIT_CODE_KILL); return result; } if (StringUtils.isEmpty(execCommand)) { TaskExecutionContextCacheManager.removeByTaskInstanceId(taskInstanceId); return result; } String commandFilePath = buildCommandFilePath(); // create command file if not exists createCommandFileIfNotExists(execCommand, commandFilePath); //build process buildProcess(commandFilePath); // parse process output parseProcessOutput(process); int processId = getProcessId(process); result.setProcessId(processId); // cache processId taskRequest.setProcessId(processId); boolean updateTaskExecutionContextStatus = TaskExecutionContextCacheManager.updateTaskExecutionContext(taskRequest); if (Boolean.FALSE.equals(updateTaskExecutionContextStatus)) { ProcessUtils.kill(taskRequest); result.setExitStatusCode(EXIT_CODE_KILL); return result; } // print process id logger.info("process start, process id is: {}", processId); // if timeout occurs, exit directly long remainTime = getRemainTime(); // waiting for the run to finish boolean status = process.waitFor(remainTime, TimeUnit.SECONDS); // if SHELL task exit if (status) { // set appIds List<String> appIds = getAppIds(taskRequest.getLogPath()); result.setAppIds(String.join(TaskConstants.COMMA, appIds)); // SHELL task state result.setExitStatusCode(process.exitValue()); } else { logger.error("process has failure , exitStatusCode:{}, processExitValue:{}, ready to kill ...", result.getExitStatusCode(), process.exitValue()); ProcessUtils.kill(taskRequest); result.setExitStatusCode(EXIT_CODE_FAILURE); } logger.info("process has exited, execute path:{}, processId:{} ,exitStatusCode:{} ,processWaitForStatus:{} ,processExitValue:{}", taskRequest.getExecutePath(), processId, result.getExitStatusCode(), status, process.exitValue()); return result; } public String getVarPool() { return varPool.toString(); } /** * cancel application * * @throws Exception exception */ public void cancelApplication() throws Exception { if (process == null) { return; } // clear log clear(); int processId = getProcessId(process); logger.info("cancel process: {}", processId); // kill , waiting for completion boolean killed = softKill(processId); if (!killed) { // hard kill hardKill(processId); // destory process.destroy(); process = null; } } /** * soft kill * * @param processId process id * @return process is alive */ private boolean softKill(int processId) { if (processId != 0 && process.isAlive()) { try { // sudo -u user command to run command String cmd = String.format("kill %d", processId); cmd = OSUtils.getSudoCmd(taskRequest.getTenantCode(), cmd); logger.info("soft kill task:{}, process id:{}, cmd:{}", taskRequest.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.info("kill attempt failed", e); } } return process.isAlive(); } /** * hard kill * * @param processId process id */ private void hardKill(int processId) { if (processId != 0 && process.isAlive()) { try { String cmd = String.format("kill -9 %d", processId); cmd = OSUtils.getSudoCmd(taskRequest.getTenantCode(), cmd); logger.info("hard kill task:{}, process id:{}, cmd:{}", taskRequest.getTaskAppId(), processId, cmd); Runtime.getRuntime().exec(cmd); } catch (IOException e) { logger.error("kill attempt failed ", e); } } } private void printCommand(List<String> commands) { logger.info("task run command: {}", String.join(" ", commands)); } /** * clear */ private void clear() { LinkedBlockingQueue<String> markerLog = new LinkedBlockingQueue<>(1); markerLog.add(ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER.toString()); if (!logBuffer.isEmpty()) { // log handle logHandler.accept(logBuffer); logBuffer.clear(); } logHandler.accept(markerLog); } /** * get the standard output of the process * * @param process process */ private void parseProcessOutput(Process process) { String threadLoggerInfoName = String.format(LoggerUtils.TASK_LOGGER_THREAD_NAME + "-%s", taskRequest.getTaskAppId()); ExecutorService getOutputLogService = newDaemonSingleThreadExecutor(threadLoggerInfoName + "-" + "getOutputLogService"); getOutputLogService.submit(() -> { try (BufferedReader inReader = new BufferedReader(new InputStreamReader(process.getInputStream()))) { String line; logBuffer.add("welcome to use bigdata scheduling system..."); while ((line = inReader.readLine()) != null) { if (line.startsWith("${setValue(")) { varPool.append(line, "${setValue(".length(), line.length() - 2); varPool.append("$VarPool$"); } else { logBuffer.add(line); taskResultString = line; } } logOutputIsSuccess = true; } catch (Exception e) { logger.error(e.getMessage(), e); logOutputIsSuccess = true; } }); getOutputLogService.shutdown(); ExecutorService parseProcessOutputExecutorService = newDaemonSingleThreadExecutor(threadLoggerInfoName); parseProcessOutputExecutorService.submit(() -> { try { long lastFlushTime = System.currentTimeMillis(); while (logBuffer.size() > 0 || !logOutputIsSuccess) { if (logBuffer.size() > 0) { lastFlushTime = flush(lastFlushTime); } else { Thread.sleep(TaskConstants.DEFAULT_LOG_FLUSH_INTERVAL); } } } catch (Exception e) { Thread.currentThread().interrupt(); logger.error(e.getMessage(), e); } finally { clear(); } }); parseProcessOutputExecutorService.shutdown(); } /** * get app links * * @param logPath log path * @return app id list */ private List<String> getAppIds(String logPath) { List<String> logs = convertFile2List(logPath); List<String> appIds = new ArrayList<>(); /* * analysis log?get submited yarn application id */ for (String log : logs) { String appId = findAppId(log); if (StringUtils.isNotEmpty(appId) && !appIds.contains(appId)) { logger.info("find app id: {}", appId); appIds.add(appId); } } return appIds; } /** * convert file to list * * @param filename file name * @return line list */ private List<String> convertFile2List(String filename) { List<String> lineList = new ArrayList<>(100); File file = new File(filename); if (!file.exists()) { return lineList; } try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8))) { String line; while ((line = br.readLine()) != null) { lineList.add(line); } } catch (Exception e) { logger.error(String.format("read file: %s failed : ", filename), e); } return lineList; } /** * find app id * * @param line line * @return appid */ private String findAppId(String line) { Matcher matcher = APPLICATION_REGEX.matcher(line); if (matcher.find()) { return matcher.group(); } return null; } /** * get remain time(s) * * @return remain time */ private long getRemainTime() { long usedTime = (System.currentTimeMillis() - taskRequest.getStartTime().getTime()) / 1000; long remainTime = taskRequest.getTaskTimeout() - usedTime; if (remainTime < 0) { throw new RuntimeException("task execution time out"); } return remainTime; } /** * get process id * * @param process process * @return process id */ private int getProcessId(Process process) { int processId = 0; try { Field f = process.getClass().getDeclaredField(TaskConstants.PID); f.setAccessible(true); processId = f.getInt(process); } catch (Throwable e) { logger.error(e.getMessage(), e); } return processId; } /** * when log buffer siz or flush time reach condition , then flush * * @param lastFlushTime last flush time * @return last flush time */ private long flush(long lastFlushTime) { long now = System.currentTimeMillis(); /* * when log buffer siz or flush time reach condition , then flush */ if (logBuffer.size() >= TaskConstants.DEFAULT_LOG_ROWS_NUM || now - lastFlushTime > TaskConstants.DEFAULT_LOG_FLUSH_INTERVAL) { lastFlushTime = now; logHandler.accept(logBuffer); logBuffer.clear(); } return lastFlushTime; } protected abstract String buildCommandFilePath(); protected abstract void createCommandFileIfNotExists(String execCommand, String commandFile) throws IOException; ExecutorService newDaemonSingleThreadExecutor(String threadName) { ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadName) .build(); return Executors.newSingleThreadExecutor(threadFactory); } protected abstract String commandInterpreter(); }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,306
[Bug] [Worker] task log path generated error.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task log path generated error. ### What you expected to happen task log-path in db is different with worker. ### How to reproduce run a process definition. you will find the shell-task's log-path in db is different with the worker. worker log-path: logs/definition_id/instance_id/task_instance_id.log db-path: /logs/definition_code/instance_code/task_instance_id.log ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6306
https://github.com/apache/dolphinscheduler/pull/6311
0b53adeb07df72a9b888566092642a77dfccf65d
163a0696a4ee71a44e02054caef9b78387a9e3b2
"2021-09-23T06:09:23Z"
java
"2021-09-23T08:51:58Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/api/AbstractTaskExecutor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.api; import static ch.qos.logback.classic.ClassicConstants.FINALIZE_SESSION_MARKER; import org.apache.dolphinscheduler.plugin.task.util.LoggerUtils; import org.apache.dolphinscheduler.spi.task.AbstractTask; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import java.util.List; import java.util.StringJoiner; import java.util.concurrent.LinkedBlockingQueue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.Marker; import org.slf4j.MarkerFactory; public abstract class AbstractTaskExecutor extends AbstractTask { public static final Marker FINALIZE_SESSION_MARKER = MarkerFactory.getMarker("FINALIZE_SESSION"); protected Logger logger; /** * constructor * * @param taskRequest taskRequest */ protected AbstractTaskExecutor(TaskRequest taskRequest) { super(taskRequest); logger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX, taskRequest.getProcessDefineId(), taskRequest.getProcessInstanceId(), taskRequest.getTaskInstanceId())); } /** * log handle * * @param logs log list */ public void logHandle(LinkedBlockingQueue<String> logs) { // note that the "new line" is added here to facilitate log parsing if (logs.contains(FINALIZE_SESSION_MARKER.toString())) { logger.info(FINALIZE_SESSION_MARKER, FINALIZE_SESSION_MARKER.toString()); } else { StringJoiner joiner = new StringJoiner("\n\t"); while (!logs.isEmpty()) { joiner.add(logs.poll()); } logger.info(" -> {}", joiner); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,306
[Bug] [Worker] task log path generated error.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened task log path generated error. ### What you expected to happen task log-path in db is different with worker. ### How to reproduce run a process definition. you will find the shell-task's log-path in db is different with the worker. worker log-path: logs/definition_id/instance_id/task_instance_id.log db-path: /logs/definition_code/instance_code/task_instance_id.log ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6306
https://github.com/apache/dolphinscheduler/pull/6311
0b53adeb07df72a9b888566092642a77dfccf65d
163a0696a4ee71a44e02054caef9b78387a9e3b2
"2021-09-23T06:09:23Z"
java
"2021-09-23T08:51:58Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-api/src/main/java/org/apache/dolphinscheduler/plugin/task/util/LoggerUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.util; import java.util.ArrayList; import java.util.List; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.slf4j.Logger; /** * logger utils */ public class LoggerUtils { private static final String APPLICATION_REGEX_NAME = "application_\\d+_\\d+"; private LoggerUtils() { throw new UnsupportedOperationException("Construct LoggerUtils"); } /** * rules for extracting application ID */ private static final Pattern APPLICATION_REGEX = Pattern.compile(APPLICATION_REGEX_NAME); /** * Task Logger's prefix */ public static final String TASK_LOGGER_INFO_PREFIX = "TASK"; /** * Task Logger Thread's name */ public static final String TASK_LOGGER_THREAD_NAME = "TaskLogInfo"; /** * Task Logger Thread's name */ public static final String TASK_APPID_LOG_FORMAT = "[taskAppId="; /** * build job id * * @param affix Task Logger's prefix * @param processDefId process define id * @param processInstId process instance id * @param taskId task id * @return task id format */ public static String buildTaskId(String affix, int processDefId, int processInstId, int taskId) { // - [taskAppId=TASK_79_4084_15210] return String.format(" - %s%s-%s-%s-%s]", TASK_APPID_LOG_FORMAT, affix, processDefId, processInstId, taskId); } /** * processing log * get yarn application id list * * @param log log content * @param logger logger * @return app id list */ public static List<String> getAppIds(String log, Logger logger) { List<String> appIds = new ArrayList<>(); Matcher matcher = APPLICATION_REGEX.matcher(log); // analyse logs to get all submit yarn application id while (matcher.find()) { String appId = matcher.group(); if (!appIds.contains(appId)) { logger.info("find app id: {}", appId); appIds.add(appId); } } return appIds; } public static void logError(Optional<Logger> optionalLogger , String error) { optionalLogger.ifPresent((Logger logger) -> logger.error(error)); } public static void logError(Optional<Logger> optionalLogger , Throwable e) { optionalLogger.ifPresent((Logger logger) -> logger.error(e.getMessage(), e)); } public static void logError(Optional<Logger> optionalLogger , String error, Throwable e) { optionalLogger.ifPresent((Logger logger) -> logger.error(error, e)); } public static void logInfo(Optional<Logger> optionalLogger , String info) { optionalLogger.ifPresent((Logger logger) -> logger.info(info)); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,292
[Bug] [Task] Spark task execute failed due to NPE
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev when execute spark task, a NullPointerException occurs due to `mainJar ` has no `resourceName` field. it is because the master only saves `resourceId` of `mainJar` in the database, when the worker fetch task data from database, it does not query resource information, so the `mainJar ` field of `SparkParameters` is like this: ```json {"id": 1} ``` `res` and `resourceName` are lost, and became `null` value in the object. this causes NPE when execute `mainJar.getResourceName()` method ### What you expected to happen spark task execute successfully ### How to reproduce create a new spark task, and run it ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6292
https://github.com/apache/dolphinscheduler/pull/6295
35312fd498bf5f2059b0e77b68f557c388870310
c4375a54c167f8e96d80c3cc67ad0a607ba2ef7b
"2021-09-22T12:03:42Z"
java
"2021-09-23T14:23:17Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.process; /** * resource info */ public class ResourceInfo { /** * res the name of the resource that was uploaded */ private int id; public int getId() { return id; } public void setId(int id) { this.id = id; } private String res; public String getRes() { return res; } public void setRes(String res) { this.res = res; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,292
[Bug] [Task] Spark task execute failed due to NPE
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev when execute spark task, a NullPointerException occurs due to `mainJar ` has no `resourceName` field. it is because the master only saves `resourceId` of `mainJar` in the database, when the worker fetch task data from database, it does not query resource information, so the `mainJar ` field of `SparkParameters` is like this: ```json {"id": 1} ``` `res` and `resourceName` are lost, and became `null` value in the object. this causes NPE when execute `mainJar.getResourceName()` method ### What you expected to happen spark task execute successfully ### How to reproduce create a new spark task, and run it ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6292
https://github.com/apache/dolphinscheduler/pull/6295
35312fd498bf5f2059b0e77b68f557c388870310
c4375a54c167f8e96d80c3cc67ad0a607ba2ef7b
"2021-09-22T12:03:42Z"
java
"2021-09-23T14:23:17Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; break; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.toMap(taskInstance.getTaskParams(), String.class, Object.class); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinition); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0; int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } } if (!newTaskDefinitionLogs.isEmpty()) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,292
[Bug] [Task] Spark task execute failed due to NPE
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev when execute spark task, a NullPointerException occurs due to `mainJar ` has no `resourceName` field. it is because the master only saves `resourceId` of `mainJar` in the database, when the worker fetch task data from database, it does not query resource information, so the `mainJar ` field of `SparkParameters` is like this: ```json {"id": 1} ``` `res` and `resourceName` are lost, and became `null` value in the object. this causes NPE when execute `mainJar.getResourceName()` method ### What you expected to happen spark task execute successfully ### How to reproduce create a new spark task, and run it ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6292
https://github.com/apache/dolphinscheduler/pull/6295
35312fd498bf5f2059b0e77b68f557c388870310
c4375a54c167f8e96d80c3cc67ad0a607ba2ef7b
"2021-09-22T12:03:42Z"
java
"2021-09-23T14:23:17Z"
dolphinscheduler-service/src/test/java/org/apache/dolphinscheduler/service/process/ProcessServiceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.mockito.ArgumentMatchers.any; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.quartz.cron.CronUtilsTest; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.junit.MockitoJUnitRunner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.databind.JsonNode; import com.google.common.collect.Lists; /** * process service test */ @RunWith(MockitoJUnitRunner.class) public class ProcessServiceTest { private static final Logger logger = LoggerFactory.getLogger(CronUtilsTest.class); @InjectMocks private ProcessService processService; @Mock private CommandMapper commandMapper; @Mock private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Mock private ErrorCommandMapper errorCommandMapper; @Mock private ProcessDefinitionMapper processDefineMapper; @Mock private ProcessInstanceMapper processInstanceMapper; @Mock private UserMapper userMapper; @Mock private TaskInstanceMapper taskInstanceMapper; @Mock private TaskDefinitionLogMapper taskDefinitionLogMapper; @Mock private TaskDefinitionMapper taskDefinitionMapper; @Mock private ProcessTaskRelationMapper processTaskRelationMapper; @Mock private ProcessDefinitionLogMapper processDefineLogMapper; @Test public void testCreateSubCommand() { ProcessInstance parentInstance = new ProcessInstance(); parentInstance.setWarningType(WarningType.SUCCESS); parentInstance.setWarningGroupId(0); TaskInstance task = new TaskInstance(); task.setTaskParams("{\"processDefinitionId\":100}}"); task.setId(10); task.setTaskCode(1L); task.setTaskDefinitionVersion(1); ProcessInstance childInstance = null; ProcessInstanceMap instanceMap = new ProcessInstanceMap(); instanceMap.setParentProcessInstanceId(1); instanceMap.setParentTaskInstanceId(10); Command command; //father history: start; child null == command type: start parentInstance.setHistoryCmd("START_PROCESS"); parentInstance.setCommandType(CommandType.START_PROCESS); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); Mockito.when(processDefineMapper.queryByDefineId(100)).thenReturn(processDefinition); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType()); //father history: start,start failure; child null == command type: start parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_PROCESS, command.getCommandType()); //father history: scheduler,start failure; child null == command type: scheduler parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("SCHEDULER,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.SCHEDULER, command.getCommandType()); //father history: complement,start failure; child null == command type: complement String startString = "2020-01-01 00:00:00"; String endString = "2020-01-10 00:00:00"; parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("COMPLEMENT_DATA,START_FAILURE_TASK_PROCESS"); Map<String, String> complementMap = new HashMap<>(); complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE, startString); complementMap.put(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE, endString); parentInstance.setCommandParam(JSONUtils.toJsonString(complementMap)); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.COMPLEMENT_DATA, command.getCommandType()); JsonNode complementDate = JSONUtils.parseObject(command.getCommandParam()); Date start = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE).asText()); Date end = DateUtils.stringToDate(complementDate.get(Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE).asText()); Assert.assertEquals(startString, DateUtils.dateToString(start)); Assert.assertEquals(endString, DateUtils.dateToString(end)); //father history: start,failure,start failure; child not null == command type: start failure childInstance = new ProcessInstance(); parentInstance.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); parentInstance.setHistoryCmd("START_PROCESS,START_FAILURE_TASK_PROCESS"); command = processService.createSubProcessCommand(parentInstance, childInstance, instanceMap, task); Assert.assertEquals(CommandType.START_FAILURE_TASK_PROCESS, command.getCommandType()); } @Test public void testVerifyIsNeedCreateCommand() { List<Command> commands = new ArrayList<>(); Command command = new Command(); command.setCommandType(CommandType.REPEAT_RUNNING); command.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"111\"}"); commands.add(command); Mockito.when(commandMapper.selectList(null)).thenReturn(commands); Assert.assertFalse(processService.verifyIsNeedCreateCommand(command)); Command command1 = new Command(); command1.setCommandType(CommandType.REPEAT_RUNNING); command1.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"222\"}"); Assert.assertTrue(processService.verifyIsNeedCreateCommand(command1)); Command command2 = new Command(); command2.setCommandType(CommandType.PAUSE); Assert.assertTrue(processService.verifyIsNeedCreateCommand(command2)); } @Test public void testCreateRecoveryWaitingThreadCommand() { int id = 123; Mockito.when(commandMapper.deleteById(id)).thenReturn(1); ProcessInstance subProcessInstance = new ProcessInstance(); subProcessInstance.setIsSubProcess(Flag.YES); Command originCommand = new Command(); originCommand.setId(id); processService.createRecoveryWaitingThreadCommand(originCommand, subProcessInstance); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(111); processService.createRecoveryWaitingThreadCommand(null, subProcessInstance); Command recoverCommand = new Command(); recoverCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); processService.createRecoveryWaitingThreadCommand(recoverCommand, subProcessInstance); Command repeatRunningCommand = new Command(); recoverCommand.setCommandType(CommandType.REPEAT_RUNNING); processService.createRecoveryWaitingThreadCommand(repeatRunningCommand, subProcessInstance); ProcessInstance subProcessInstance2 = new ProcessInstance(); subProcessInstance2.setId(111); subProcessInstance2.setIsSubProcess(Flag.NO); processService.createRecoveryWaitingThreadCommand(repeatRunningCommand, subProcessInstance2); } @Test public void testHandleCommand() { //cannot construct process instance, return null; String host = "127.0.0.1"; int validThreadNum = 1; Command command = new Command(); command.setProcessDefinitionCode(222); command.setCommandType(CommandType.REPEAT_RUNNING); command.setCommandParam("{\"" + CMD_PARAM_RECOVER_PROCESS_ID_STRING + "\":\"111\",\"" + CMD_PARAM_SUB_PROCESS_DEFINE_ID + "\":\"222\"}"); Assert.assertNull(processService.handleCommand(logger, host, validThreadNum, command)); //there is not enough thread for this command Command command1 = new Command(); command1.setProcessDefinitionCode(123); command1.setCommandParam("{\"ProcessInstanceId\":222}"); command1.setCommandType(CommandType.START_PROCESS); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); processDefinition.setCode(11L); processDefinition.setGlobalParams("[{\"prop\":\"startParam1\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"\"}]"); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(222); processInstance.setProcessDefinitionCode(11L); processInstance.setProcessDefinitionVersion(1); Mockito.when(processDefineMapper.queryByCode(command1.getProcessDefinitionCode())).thenReturn(processDefinition); Mockito.when(processDefineLogMapper.queryByDefinitionCodeAndVersion(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion())).thenReturn(new ProcessDefinitionLog(processDefinition)); Mockito.when(processInstanceMapper.queryDetailById(222)).thenReturn(processInstance); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command1)); Command command2 = new Command(); command2.setCommandParam("{\"ProcessInstanceId\":222,\"StartNodeIdList\":\"n1,n2\"}"); command2.setProcessDefinitionCode(123); command2.setCommandType(CommandType.RECOVER_SUSPENDED_PROCESS); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command2)); Command command3 = new Command(); command3.setProcessDefinitionCode(123); command3.setCommandParam("{\"WaitingThreadInstanceId\":222}"); command3.setCommandType(CommandType.START_FAILURE_TASK_PROCESS); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command3)); Command command4 = new Command(); command4.setProcessDefinitionCode(123); command4.setCommandParam("{\"WaitingThreadInstanceId\":222,\"StartNodeIdList\":\"n1,n2\"}"); command4.setCommandType(CommandType.REPEAT_RUNNING); Assert.assertNotNull(processService.handleCommand(logger, host, validThreadNum, command4)); Command command5 = new Command(); command5.setProcessDefinitionCode(123); HashMap<String, String> startParams = new HashMap<>(); startParams.put("startParam1", "testStartParam1"); HashMap<String, String> commandParams = new HashMap<>(); commandParams.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams)); command5.setCommandParam(JSONUtils.toJsonString(commandParams)); command5.setCommandType(CommandType.START_PROCESS); ProcessInstance processInstance1 = processService.handleCommand(logger, host, validThreadNum, command5); Assert.assertTrue(processInstance1.getGlobalParams().contains("\"testStartParam1\"")); } @Test public void testGetUserById() { User user = new User(); user.setId(123); Mockito.when(userMapper.selectById(123)).thenReturn(user); Assert.assertEquals(user, processService.getUserById(123)); } @Test public void testFormatTaskAppId() { TaskInstance taskInstance = new TaskInstance(); taskInstance.setId(333); taskInstance.setProcessInstanceId(222); Mockito.when(processService.findProcessInstanceById(taskInstance.getProcessInstanceId())).thenReturn(null); Assert.assertEquals("", processService.formatTaskAppId(taskInstance)); ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setId(111); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(222); processInstance.setProcessDefinitionVersion(1); processInstance.setProcessDefinitionCode(1L); Mockito.when(processService.findProcessInstanceById(taskInstance.getProcessInstanceId())).thenReturn(processInstance); Assert.assertEquals("", processService.formatTaskAppId(taskInstance)); } @Test public void testRecurseFindSubProcessId() { int parentProcessDefineId = 1; long parentProcessDefineCode = 1L; int parentProcessDefineVersion = 1; ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(parentProcessDefineCode); processDefinition.setVersion(parentProcessDefineVersion); Mockito.when(processDefineMapper.selectById(parentProcessDefineId)).thenReturn(processDefinition); long postTaskCode = 2L; int postTaskVersion = 2; List<ProcessTaskRelationLog> relationLogList = new ArrayList<>(); ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(); processTaskRelationLog.setPostTaskCode(postTaskCode); processTaskRelationLog.setPostTaskVersion(postTaskVersion); relationLogList.add(processTaskRelationLog); Mockito.when(processTaskRelationLogMapper.queryByProcessCodeAndVersion(parentProcessDefineCode , parentProcessDefineVersion)).thenReturn(relationLogList); List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); TaskDefinitionLog taskDefinitionLog1 = new TaskDefinitionLog(); taskDefinitionLog1.setTaskParams("{\"processDefinitionId\": 123}"); taskDefinitionLogs.add(taskDefinitionLog1); Mockito.when(taskDefinitionLogMapper.queryByTaskDefinitions(Mockito.anySet())).thenReturn(taskDefinitionLogs); List<Integer> ids = new ArrayList<>(); processService.recurseFindSubProcessId(parentProcessDefineId, ids); Assert.assertEquals(1, ids.size()); } @Test public void testSwitchVersion() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); processDefinition.setProjectCode(1L); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(); processDefinitionLog.setCode(1L); processDefinitionLog.setVersion(2); Assert.assertEquals(0, processService.switchVersion(processDefinition, processDefinitionLog)); } @Test public void testSaveTaskDefine() { User operator = new User(); operator.setId(-1); operator.setUserType(UserType.GENERAL_USER); long projectCode = 751485690568704L; String taskJson = "[{\"code\":751500437479424,\"name\":\"aa\",\"version\":1,\"description\":\"\",\"delayTime\":0," + "\"taskType\":\"SHELL\",\"taskParams\":{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"sleep 1s\\necho 11\"," + "\"dependence\":{},\"conditionResult\":{\"successNode\":[\"\"],\"failedNode\":[\"\"]},\"waitStartTimeout\":{}}," + "\"flag\":\"YES\",\"taskPriority\":\"MEDIUM\",\"workerGroup\":\"yarn\",\"failRetryTimes\":0,\"failRetryInterval\":1," + "\"timeoutFlag\":\"OPEN\",\"timeoutNotifyStrategy\":\"FAILED\",\"timeout\":1,\"environmentCode\":751496815697920}," + "{\"code\":751516889636864,\"name\":\"bb\",\"description\":\"\",\"taskType\":\"SHELL\",\"taskParams\":{\"resourceList\":[]," + "\"localParams\":[],\"rawScript\":\"echo 22\",\"dependence\":{},\"conditionResult\":{\"successNode\":[\"\"],\"failedNode\":[\"\"]}," + "\"waitStartTimeout\":{}},\"flag\":\"YES\",\"taskPriority\":\"MEDIUM\",\"workerGroup\":\"default\",\"failRetryTimes\":\"0\"," + "\"failRetryInterval\":\"1\",\"timeoutFlag\":\"CLOSE\",\"timeoutNotifyStrategy\":\"\",\"timeout\":0,\"delayTime\":\"0\",\"environmentCode\":-1}]"; List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskJson, TaskDefinitionLog.class); TaskDefinitionLog taskDefinition = new TaskDefinitionLog(); taskDefinition.setCode(751500437479424L); taskDefinition.setName("aa"); taskDefinition.setProjectCode(751485690568704L); taskDefinition.setTaskType(TaskType.SHELL.getDesc()); taskDefinition.setUserId(-1); taskDefinition.setVersion(1); taskDefinition.setCreateTime(new Date()); taskDefinition.setUpdateTime(new Date()); Mockito.when(taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskDefinition.getCode(), taskDefinition.getVersion())).thenReturn(taskDefinition); Mockito.when(taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinition.getCode())).thenReturn(1); Mockito.when(taskDefinitionMapper.queryByCode(taskDefinition.getCode())).thenReturn(taskDefinition); int result = processService.saveTaskDefine(operator, projectCode, taskDefinitionLogs); Assert.assertEquals(0, result); } @Test public void testGenDagGraph() { ProcessDefinition processDefinition = new ProcessDefinition(); processDefinition.setCode(1L); processDefinition.setId(123); processDefinition.setName("test"); processDefinition.setVersion(1); processDefinition.setCode(11L); ProcessTaskRelation processTaskRelation = new ProcessTaskRelation(); processTaskRelation.setName("def 1"); processTaskRelation.setProcessDefinitionVersion(1); processTaskRelation.setProjectCode(1L); processTaskRelation.setProcessDefinitionCode(1L); processTaskRelation.setPostTaskCode(3L); processTaskRelation.setPreTaskCode(2L); processTaskRelation.setUpdateTime(new Date()); processTaskRelation.setCreateTime(new Date()); List<ProcessTaskRelation> list = new ArrayList<>(); list.add(processTaskRelation); TaskDefinitionLog taskDefinition = new TaskDefinitionLog(); taskDefinition.setCode(3L); taskDefinition.setName("1-test"); taskDefinition.setProjectCode(1L); taskDefinition.setTaskType(TaskType.SHELL.getDesc()); taskDefinition.setUserId(1); taskDefinition.setVersion(2); taskDefinition.setCreateTime(new Date()); taskDefinition.setUpdateTime(new Date()); TaskDefinitionLog td2 = new TaskDefinitionLog(); td2.setCode(2L); td2.setName("unit-test"); td2.setProjectCode(1L); td2.setTaskType(TaskType.SHELL.getDesc()); td2.setUserId(1); td2.setVersion(1); td2.setCreateTime(new Date()); td2.setUpdateTime(new Date()); List<TaskDefinitionLog> taskDefinitionLogs = new ArrayList<>(); taskDefinitionLogs.add(taskDefinition); taskDefinitionLogs.add(td2); Mockito.when(taskDefinitionLogMapper.queryByTaskDefinitions(any())).thenReturn(taskDefinitionLogs); Mockito.when(processTaskRelationMapper.queryByProcessCode(Mockito.anyLong(), Mockito.anyLong())).thenReturn(list); DAG<String, TaskNode, TaskNodeRelation> stringTaskNodeTaskNodeRelationDAG = processService.genDagGraph(processDefinition); Assert.assertEquals(1, stringTaskNodeTaskNodeRelationDAG.getNodesCount()); } @Test public void testCreateCommand() { Command command = new Command(); command.setProcessDefinitionCode(123); command.setCommandParam("{\"ProcessInstanceId\":222}"); command.setCommandType(CommandType.START_PROCESS); int mockResult = 1; Mockito.when(commandMapper.insert(command)).thenReturn(mockResult); int exeMethodResult = processService.createCommand(command); Assert.assertEquals(mockResult, exeMethodResult); Mockito.verify(commandMapper, Mockito.times(1)).insert(command); } @Test public void testChangeOutParam() { TaskInstance taskInstance = new TaskInstance(); taskInstance.setProcessInstanceId(62); ProcessInstance processInstance = new ProcessInstance(); processInstance.setId(62); taskInstance.setVarPool("[{\"direct\":\"OUT\",\"prop\":\"test1\",\"type\":\"VARCHAR\",\"value\":\"\"}]"); taskInstance.setTaskParams("{\"type\":\"MYSQL\",\"datasource\":1,\"sql\":\"select id from tb_test limit 1\"," + "\"udfs\":\"\",\"sqlType\":\"0\",\"sendEmail\":false,\"displayRows\":10,\"title\":\"\"," + "\"groupId\":null,\"localParams\":[{\"prop\":\"test1\",\"direct\":\"OUT\",\"type\":\"VARCHAR\",\"value\":\"12\"}]," + "\"connParams\":\"\",\"preStatements\":[],\"postStatements\":[],\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"]," + "\\\"failedNode\\\":[\\\"\\\"]}\",\"dependence\":\"{}\"}"); processService.changeOutParam(taskInstance); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,320
[Bug] [API] batch delete process definition bug
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened batch delete process definition fail ### What you expected to happen batch delete process definition success ### How to reproduce batch delete process definition ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6320
https://github.com/apache/dolphinscheduler/pull/6321
c4375a54c167f8e96d80c3cc67ad0a607ba2ef7b
99f593cf42d6abd4312c74511f60ac8a2eeafb25
"2021-09-23T12:44:06Z"
java
"2021-09-23T14:43:15Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs); } private Map<String, Object> createDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } return result; } private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { Map<String, Object> result = new HashMap<>(); try { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream() .map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion == 0) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { int delete = scheduleMapper.deleteById(schedule.getId()); if (delete == 0) { putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); throw new ServiceException(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); } } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if ((delete & deleteRelation) == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); if (updateProcess > 0 && scheduleList.size() == 1) { Schedule schedule = scheduleList.get(0); logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); int updateSchedule = scheduleMapper.updateById(schedule); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList()); if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) { putMsg(createDagResult, Status.SUCCESS); } else { result.putAll(createDagResult); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); treeViewDto.setCode(taskNode.getCode()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); try { result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR); } } else { try { result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR); } } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(processDefinition.getCode(), processDefinition.getVersion()); if ((deleteLog & deleteRelationLog) == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,316
[Bug] [Script] keytab username will not be set correctly if with @ sign
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened if we use defalut install.sh and install_config.conf, the keytab username will not be replaced correctly,which blame > sed: -e expression #1, char 62: unknown option to `s' ### What you expected to happen keytab username can be set correctly ### How to reproduce the easiest way is run below in a shell: ```shell keytabUserName=@ sed -i "s@^login.user.keytab.username=.*@login.user.keytab.username=${keytabUserName}@g" conf/common.properties ``` ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6316
https://github.com/apache/dolphinscheduler/pull/6317
99f593cf42d6abd4312c74511f60ac8a2eeafb25
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
"2021-09-23T11:20:40Z"
java
"2021-09-23T14:43:58Z"
dolphinscheduler-server/src/main/resources/config/install_config.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # registry config # registry plugin dir # Note: find and load the Registry Plugin Jar from this dir. registryPluginDir="/data1_1T/dolphinscheduler/lib/plugin/registry" registryPluginName="zookeeper" registryServers="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # user data local directory path, please make sure the directory exists and have read write permissions dataBasedirPath="/tmp/dolphinscheduler" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort="8088" # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username keytabUserName="hdfs-mycluster@ESZ.COM" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # kerberos expire time, the unit is hour kerberosExpireTime="2" # use sudo or not sudoEnable="true" # worker tenant auto create workerTenantAutoCreate="false" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ExecutorServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_PARAMS; import static org.apache.dolphinscheduler.common.Constants.MAX_TASK_TIMEOUT; import org.apache.dolphinscheduler.api.enums.ExecuteType; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.service.ExecutorService; import org.apache.dolphinscheduler.api.service.MonitorService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.RunMode; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.model.Server; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Date; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * executor service impl */ @Service public class ExecutorServiceImpl extends BaseServiceImpl implements ExecutorService { private static final Logger logger = LoggerFactory.getLogger(ExecutorServiceImpl.class); @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private MonitorService monitorService; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private ProcessService processService; @Autowired StateEventCallbackService stateEventCallbackService; /** * execute process instance * * @param loginUser login user * @param projectCode project code * @param processDefinitionCode process definition code * @param cronTime cron time * @param commandType command type * @param failureStrategy failure strategy * @param startNodeList start nodelist * @param taskDependType node dependency type * @param warningType warning type * @param warningGroupId notify group id * @param processInstancePriority process instance priority * @param workerGroup worker group name * @param environmentCode environment code * @param runMode run mode * @param timeout timeout * @param startParams the global param values which pass to new process instance * @param expectedParallelismNumber the expected parallelism number when execute complement in parallel mode * @return execute process instance code */ @Override public Map<String, Object> execProcessInstance(User loginUser, long projectCode, long processDefinitionCode, String cronTime, CommandType commandType, FailureStrategy failureStrategy, String startNodeList, TaskDependType taskDependType, WarningType warningType, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Long environmentCode,Integer timeout, Map<String, String> startParams, Integer expectedParallelismNumber) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // timeout is invalid if (timeout <= 0 || timeout > MAX_TASK_TIMEOUT) { putMsg(result, Status.TASK_TIMEOUT_PARAMS_ERROR); return result; } // check process define release state ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); result = checkProcessDefinitionValid(processDefinition, processDefinitionCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); return result; } // check master exists if (!checkMasterExists(result)) { return result; } /** * create command */ int create = this.createCommand(commandType, processDefinition.getCode(), taskDependType, failureStrategy, startNodeList, cronTime, warningType, loginUser.getId(), warningGroupId, runMode, processInstancePriority, workerGroup, environmentCode, startParams, expectedParallelismNumber); if (create > 0) { processDefinition.setWarningGroupId(warningGroupId); processDefinitionMapper.updateById(processDefinition); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.START_PROCESS_INSTANCE_ERROR); } return result; } /** * check whether master exists * * @param result result * @return master exists return true , otherwise return false */ private boolean checkMasterExists(Map<String, Object> result) { // check master server exists List<Server> masterServers = monitorService.getServerListFromRegistry(true); // no master if (masterServers.isEmpty()) { putMsg(result, Status.MASTER_NOT_EXISTS); return false; } return true; } /** * check whether the process definition can be executed * * @param processDefinition process definition * @param processDefineCode process definition code * @return check result code */ @Override public Map<String, Object> checkProcessDefinitionValid(ProcessDefinition processDefinition, long processDefineCode) { Map<String, Object> result = new HashMap<>(); if (processDefinition == null) { // check process definition exists putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, processDefineCode); } else if (processDefinition.getReleaseState() != ReleaseState.ONLINE) { // check process definition online putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefineCode); } else { result.put(Constants.STATUS, Status.SUCCESS); } return result; } /** * do action to process instance:pause, stop, repeat, recover from pause, recover from stop * * @param loginUser login user * @param projectCode project code * @param processInstanceId process instance id * @param executeType execute type * @return execute result code */ @Override public Map<String, Object> execute(User loginUser, long projectCode, Integer processInstanceId, ExecuteType executeType) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check master exists if (!checkMasterExists(result)) { return result; } ProcessInstance processInstance = processService.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { putMsg(result, Status.PROCESS_INSTANCE_NOT_EXIST, processInstanceId); return result; } ProcessDefinition processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (executeType != ExecuteType.STOP && executeType != ExecuteType.PAUSE) { result = checkProcessDefinitionValid(processDefinition, processInstance.getProcessDefinitionCode()); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } } result = checkExecuteType(processInstance, executeType); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (!checkTenantSuitable(processDefinition)) { logger.error("there is not any valid tenant for the process definition: id:{},name:{}, ", processDefinition.getId(), processDefinition.getName()); putMsg(result, Status.TENANT_NOT_SUITABLE); } //get the startParams user specified at the first starting while repeat running is needed Map<String, Object> commandMap = JSONUtils.toMap(processInstance.getCommandParam(), String.class, Object.class); String startParams = null; if (MapUtils.isNotEmpty(commandMap) && executeType == ExecuteType.REPEAT_RUNNING) { Object startParamsJson = commandMap.get(Constants.CMD_PARAM_START_PARAMS); if (startParamsJson != null) { startParams = startParamsJson.toString(); } } switch (executeType) { case REPEAT_RUNNING: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.REPEAT_RUNNING, startParams); break; case RECOVER_SUSPENDED_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.RECOVER_SUSPENDED_PROCESS, startParams); break; case START_FAILURE_TASK_PROCESS: result = insertCommand(loginUser, processInstanceId, processDefinition.getCode(), CommandType.START_FAILURE_TASK_PROCESS, startParams); break; case STOP: if (processInstance.getState() == ExecutionStatus.READY_STOP) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.STOP, ExecutionStatus.READY_STOP); } break; case PAUSE: if (processInstance.getState() == ExecutionStatus.READY_PAUSE) { putMsg(result, Status.PROCESS_INSTANCE_ALREADY_CHANGED, processInstance.getName(), processInstance.getState()); } else { result = updateProcessInstancePrepare(processInstance, CommandType.PAUSE, ExecutionStatus.READY_PAUSE); } break; default: logger.error("unknown execute type : {}", executeType); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "unknown execute type"); break; } return result; } /** * check tenant suitable * * @param processDefinition process definition * @return true if tenant suitable, otherwise return false */ private boolean checkTenantSuitable(ProcessDefinition processDefinition) { Tenant tenant = processService.getTenantForProcess(processDefinition.getTenantId(), processDefinition.getUserId()); return tenant != null; } /** * Check the state of process instance and the type of operation match * * @param processInstance process instance * @param executeType execute type * @return check result code */ private Map<String, Object> checkExecuteType(ProcessInstance processInstance, ExecuteType executeType) { Map<String, Object> result = new HashMap<>(); ExecutionStatus executionStatus = processInstance.getState(); boolean checkResult = false; switch (executeType) { case PAUSE: case STOP: if (executionStatus.typeIsRunning()) { checkResult = true; } break; case REPEAT_RUNNING: if (executionStatus.typeIsFinished()) { checkResult = true; } break; case START_FAILURE_TASK_PROCESS: if (executionStatus.typeIsFailure()) { checkResult = true; } break; case RECOVER_SUSPENDED_PROCESS: if (executionStatus.typeIsPause() || executionStatus.typeIsCancel()) { checkResult = true; } break; default: break; } if (!checkResult) { putMsg(result, Status.PROCESS_INSTANCE_STATE_OPERATION_ERROR, processInstance.getName(), executionStatus.toString(), executeType.toString()); } else { putMsg(result, Status.SUCCESS); } return result; } /** * prepare to update process instance command type and status * * @param processInstance process instance * @param commandType command type * @param executionStatus execute status * @return update result */ private Map<String, Object> updateProcessInstancePrepare(ProcessInstance processInstance, CommandType commandType, ExecutionStatus executionStatus) { Map<String, Object> result = new HashMap<>(); processInstance.setCommandType(commandType); processInstance.addHistoryCmd(commandType); processInstance.setState(executionStatus); int update = processService.updateProcessInstance(processInstance); // determine whether the process is normal if (update > 0) { String host = processInstance.getHost(); String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand( processInstance.getId(), 0, processInstance.getState(), processInstance.getId(), 0 ); stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command()); putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * insert command, used in the implementation of the page, re run, recovery (pause / failure) execution * * @param loginUser login user * @param instanceId instance id * @param processDefinitionCode process definition code * @param commandType command type * @return insert result code */ private Map<String, Object> insertCommand(User loginUser, Integer instanceId, long processDefinitionCode, CommandType commandType, String startParams) { Map<String, Object> result = new HashMap<>(); //To add startParams only when repeat running is needed Map<String, Object> cmdParam = new HashMap<>(); cmdParam.put(CMD_PARAM_RECOVER_PROCESS_ID_STRING, instanceId); if (!StringUtils.isEmpty(startParams)) { cmdParam.put(CMD_PARAM_START_PARAMS, startParams); } Command command = new Command(); command.setCommandType(commandType); command.setProcessDefinitionCode(processDefinitionCode); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(loginUser.getId()); if (!processService.verifyIsNeedCreateCommand(command)) { putMsg(result, Status.PROCESS_INSTANCE_EXECUTING_COMMAND, processDefinitionCode); return result; } int create = processService.createCommand(command); if (create > 0) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.EXECUTE_PROCESS_INSTANCE_ERROR); } return result; } /** * check if sub processes are offline before starting process definition * * @param processDefinitionCode process definition code * @return check result code */ @Override public Map<String, Object> startCheckByProcessDefinedCode(long processDefinitionCode) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(processDefinitionCode); if (processDefinition == null) { logger.error("process definition is not found"); putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, "processDefinitionCode"); return result; } List<Integer> ids = new ArrayList<>(); processService.recurseFindSubProcessId(processDefinition.getId(), ids); Integer[] idArray = ids.toArray(new Integer[ids.size()]); if (!ids.isEmpty()) { List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryDefinitionListByIdList(idArray); if (processDefinitionList != null) { for (ProcessDefinition processDefinitionTmp : processDefinitionList) { /** * if there is no online process, exit directly */ if (processDefinitionTmp.getReleaseState() != ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_NOT_RELEASE, processDefinitionTmp.getName()); logger.info("not release process definition id: {} , name : {}", processDefinitionTmp.getId(), processDefinitionTmp.getName()); return result; } } } } putMsg(result, Status.SUCCESS); return result; } /** * create command * * @param commandType commandType * @param processDefineCode processDefineCode * @param nodeDep nodeDep * @param failureStrategy failureStrategy * @param startNodeList startNodeList * @param schedule schedule * @param warningType warningType * @param executorId executorId * @param warningGroupId warningGroupId * @param runMode runMode * @param processInstancePriority processInstancePriority * @param workerGroup workerGroup * @param environmentCode environmentCode * @return command id */ private int createCommand(CommandType commandType, long processDefineCode, TaskDependType nodeDep, FailureStrategy failureStrategy, String startNodeList, String schedule, WarningType warningType, int executorId, int warningGroupId, RunMode runMode, Priority processInstancePriority, String workerGroup, Long environmentCode, Map<String, String> startParams, Integer expectedParallelismNumber) { /** * instantiate command schedule instance */ Command command = new Command(); Map<String, String> cmdParam = new HashMap<>(); if (commandType == null) { command.setCommandType(CommandType.START_PROCESS); } else { command.setCommandType(commandType); } command.setProcessDefinitionCode(processDefineCode); if (nodeDep != null) { command.setTaskDependType(nodeDep); } if (failureStrategy != null) { command.setFailureStrategy(failureStrategy); } if (!StringUtils.isEmpty(startNodeList)) { cmdParam.put(CMD_PARAM_START_NODE_NAMES, startNodeList); } if (warningType != null) { command.setWarningType(warningType); } if (startParams != null && startParams.size() > 0) { cmdParam.put(CMD_PARAM_START_PARAMS, JSONUtils.toJsonString(startParams)); } command.setCommandParam(JSONUtils.toJsonString(cmdParam)); command.setExecutorId(executorId); command.setWarningGroupId(warningGroupId); command.setProcessInstancePriority(processInstancePriority); command.setWorkerGroup(workerGroup); command.setEnvironmentCode(environmentCode); Date start = null; Date end = null; if (!StringUtils.isEmpty(schedule)) { String[] interval = schedule.split(","); if (interval.length == 2) { start = DateUtils.getScheduleDate(interval[0]); end = DateUtils.getScheduleDate(interval[1]); } } // determine whether to complement if (commandType == CommandType.COMPLEMENT_DATA) { runMode = (runMode == null) ? RunMode.RUN_MODE_SERIAL : runMode; if (null != start && null != end && !start.after(end)) { if (runMode == RunMode.RUN_MODE_SERIAL) { cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(end)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); return processService.createCommand(command); } else if (runMode == RunMode.RUN_MODE_PARALLEL) { List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processDefineCode); LinkedList<Date> listDate = new LinkedList<>(); if (!CollectionUtils.isEmpty(schedules)) { for (Schedule item : schedules) { listDate.addAll(CronUtils.getSelfFireDateList(start, end, item.getCrontab())); } } if (!CollectionUtils.isEmpty(listDate)) { int effectThreadsCount = expectedParallelismNumber == null ? listDate.size() : Math.min(listDate.size(), expectedParallelismNumber); logger.info("In parallel mode, current expectedParallelismNumber:{}", effectThreadsCount); int chunkSize = listDate.size() / effectThreadsCount; listDate.addFirst(start); listDate.addLast(end); for (int i = 0; i < effectThreadsCount; i++) { int rangeStart = i == 0 ? i : (i * chunkSize); int rangeEnd = i == effectThreadsCount - 1 ? listDate.size() - 1 : rangeStart + chunkSize + 1; cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(listDate.get(rangeStart))); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(listDate.get(rangeEnd))); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); } return effectThreadsCount; } else { // loop by day int runCunt = 0; while (!start.after(end)) { runCunt += 1; cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, DateUtils.dateToString(start)); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, DateUtils.dateToString(start)); command.setCommandParam(JSONUtils.toJsonString(cmdParam)); processService.createCommand(command); start = DateUtils.getSomeDay(start, 1); } return runCunt; } } } else { logger.error("there is not valid schedule date for the process definition code:{}", processDefineCode); } } else { command.setCommandParam(JSONUtils.toJsonString(cmdParam)); return processService.createCommand(command); } return 0; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/model/TaskNode.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.model; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.commons.lang.StringUtils; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.fasterxml.jackson.databind.annotation.JsonSerialize; public class TaskNode { /** * task node id */ private String id; /** * task node code */ private long code; /** * task node version */ private int version; /** * task node name */ private String name; /** * task node description */ private String desc; /** * task node type */ private String type; /** * the run flag has two states, NORMAL or FORBIDDEN */ private String runFlag; /** * the front field */ private String loc; /** * maximum number of retries */ private int maxRetryTimes; /** * Unit of retry interval: points */ private int retryInterval; /** * params information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String params; /** * inner dependency information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String preTasks; /** * node dependency list */ private List<PreviousTaskNode> preTaskNodeList; /** * users store additional information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String extras; /** * node dependency list */ private List<String> depList; /** * outer dependency information */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String dependence; @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String conditionResult; @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String switchResult; @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String waitStartTimeout; /** * task instance priority */ private Priority taskInstancePriority; /** * worker group */ private String workerGroup; /** * environment code */ private Long environmentCode; /** * task time out */ @JsonDeserialize(using = JSONUtils.JsonDataDeserializer.class) @JsonSerialize(using = JSONUtils.JsonDataSerializer.class) private String timeout; /** * delay execution time. */ private int delayTime; public String getId() { return id; } public void setId(String id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getDesc() { return desc; } public void setDesc(String desc) { this.desc = desc; } public String getType() { return type; } public void setType(String type) { this.type = type; } public String getParams() { return params; } public void setParams(String params) { this.params = params; } public String getPreTasks() { return preTasks; } public void setPreTasks(String preTasks) { this.preTasks = preTasks; this.depList = JSONUtils.toList(preTasks, String.class); } public String getExtras() { return extras; } public void setExtras(String extras) { this.extras = extras; } public List<String> getDepList() { return depList; } public void setDepList(List<String> depList) { if (depList != null) { this.depList = depList; this.preTasks = JSONUtils.toJsonString(depList); } } public String getLoc() { return loc; } public void setLoc(String loc) { this.loc = loc; } public String getRunFlag() { return runFlag; } public void setRunFlag(String runFlag) { this.runFlag = runFlag; } public Boolean isForbidden() { return (!StringUtils.isEmpty(this.runFlag) && this.runFlag.equals(Constants.FLOWNODE_RUN_FLAG_FORBIDDEN)); } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TaskNode taskNode = (TaskNode) o; return Objects.equals(name, taskNode.name) && Objects.equals(desc, taskNode.desc) && Objects.equals(type, taskNode.type) && Objects.equals(params, taskNode.params) && Objects.equals(preTasks, taskNode.preTasks) && Objects.equals(extras, taskNode.extras) && Objects.equals(runFlag, taskNode.runFlag) && Objects.equals(dependence, taskNode.dependence) && Objects.equals(workerGroup, taskNode.workerGroup) && Objects.equals(environmentCode, taskNode.environmentCode) && Objects.equals(conditionResult, taskNode.conditionResult) && CollectionUtils.equalLists(depList, taskNode.depList); } @Override public int hashCode() { return Objects.hash(name, desc, type, params, preTasks, extras, depList, runFlag); } public String getDependence() { return dependence; } public void setDependence(String dependence) { this.dependence = dependence; } public int getMaxRetryTimes() { return maxRetryTimes; } public void setMaxRetryTimes(int maxRetryTimes) { this.maxRetryTimes = maxRetryTimes; } public int getRetryInterval() { return retryInterval; } public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } public Priority getTaskInstancePriority() { return taskInstancePriority; } public void setTaskInstancePriority(Priority taskInstancePriority) { this.taskInstancePriority = taskInstancePriority; } public String getTimeout() { return timeout; } public void setTimeout(String timeout) { this.timeout = timeout; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public String getConditionResult() { return conditionResult; } public void setConditionResult(String conditionResult) { this.conditionResult = conditionResult; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime; } public long getCode() { return code; } public void setCode(long code) { this.code = code; } public int getVersion() { return version; } public void setVersion(int version) { this.version = version; } /** * get task time out parameter * * @return task time out parameter */ public TaskTimeoutParameter getTaskTimeoutParameter() { if (!StringUtils.isEmpty(this.getTimeout())) { String formatStr = String.format("%s,%s", TaskTimeoutStrategy.WARN.name(), TaskTimeoutStrategy.FAILED.name()); String taskTimeout = this.getTimeout().replace(formatStr, TaskTimeoutStrategy.WARNFAILED.name()); return JSONUtils.parseObject(taskTimeout, TaskTimeoutParameter.class); } return new TaskTimeoutParameter(false); } public boolean isConditionsTask() { return TaskType.CONDITIONS.getDesc().equalsIgnoreCase(this.getType()); } public boolean isSwitchTask() { return TaskType.SWITCH.toString().equalsIgnoreCase(this.getType()); } public List<PreviousTaskNode> getPreTaskNodeList() { return preTaskNodeList; } public void setPreTaskNodeList(List<PreviousTaskNode> preTaskNodeList) { this.preTaskNodeList = preTaskNodeList; } public String getTaskParams() { Map<String, Object> taskParams = JSONUtils.toMap(this.params, String.class, Object.class); if (taskParams == null) { taskParams = new HashMap<>(); } taskParams.put(Constants.CONDITION_RESULT, this.conditionResult); taskParams.put(Constants.DEPENDENCE, this.dependence); taskParams.put(Constants.SWITCH_RESULT, this.switchResult); taskParams.put(Constants.WAIT_START_TIMEOUT, this.waitStartTimeout); return JSONUtils.toJsonString(taskParams); } public Map<String, Object> taskParamsToJsonObj(String taskParams) { Map<String, Object> taskParamsMap = JSONUtils.toMap(taskParams, String.class, Object.class); if (taskParamsMap == null) { taskParamsMap = new HashMap<>(); } return taskParamsMap; } @Override public String toString() { return "TaskNode{" + "id='" + id + '\'' + ", code=" + code + ", version=" + version + ", name='" + name + '\'' + ", desc='" + desc + '\'' + ", type='" + type + '\'' + ", runFlag='" + runFlag + '\'' + ", loc='" + loc + '\'' + ", maxRetryTimes=" + maxRetryTimes + ", retryInterval=" + retryInterval + ", params='" + params + '\'' + ", preTasks='" + preTasks + '\'' + ", preTaskNodeList=" + preTaskNodeList + ", extras='" + extras + '\'' + ", depList=" + depList + ", dependence='" + dependence + '\'' + ", conditionResult='" + conditionResult + '\'' + ", taskInstancePriority=" + taskInstancePriority + ", workerGroup='" + workerGroup + '\'' + ", environmentCode=" + environmentCode + ", timeout='" + timeout + '\'' + ", delayTime=" + delayTime + '}'; } public void setEnvironmentCode(Long environmentCode) { this.environmentCode = environmentCode; } public Long getEnvironmentCode() { return this.environmentCode; } public String getSwitchResult() { return switchResult; } public void setSwitchResult(String switchResult) { this.switchResult = switchResult; } public String getWaitStartTimeout() { return this.waitStartTimeout; } public void setWaitStartTimeout(String waitStartTimeout) { this.waitStartTimeout = waitStartTimeout; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import static java.nio.charset.StandardCharsets.UTF_8; import static com.fasterxml.jackson.databind.DeserializationFeature.ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT; import static com.fasterxml.jackson.databind.DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES; import static com.fasterxml.jackson.databind.DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_AS_NULL; import static com.fasterxml.jackson.databind.MapperFeature.REQUIRE_SETTERS_FOR_GETTERS; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.TimeZone; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.DeserializationContext; import com.fasterxml.jackson.databind.JsonDeserializer; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.JsonSerializer; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.SerializerProvider; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import com.fasterxml.jackson.databind.type.CollectionType; /** * json utils */ public class JSONUtils { private static final Logger logger = LoggerFactory.getLogger(JSONUtils.class); /** * can use static singleton, inject: just make sure to reuse! */ private static final ObjectMapper objectMapper = new ObjectMapper() .configure(FAIL_ON_UNKNOWN_PROPERTIES, false) .configure(ACCEPT_EMPTY_ARRAY_AS_NULL_OBJECT, true) .configure(READ_UNKNOWN_ENUM_VALUES_AS_NULL, true) .configure(REQUIRE_SETTERS_FOR_GETTERS, true) .setTimeZone(TimeZone.getDefault()); private JSONUtils() { throw new UnsupportedOperationException("Construct JSONUtils"); } public static ArrayNode createArrayNode() { return objectMapper.createArrayNode(); } public static ObjectNode createObjectNode() { return objectMapper.createObjectNode(); } public static JsonNode toJsonNode(Object obj) { return objectMapper.valueToTree(obj); } /** * json representation of object * * @param object object * @param feature feature * @return object to json string */ public static String toJsonString(Object object, SerializationFeature feature) { try { ObjectWriter writer = objectMapper.writer(feature); return writer.writeValueAsString(object); } catch (Exception e) { logger.error("object to json exception!", e); } return null; } /** * This method deserializes the specified Json into an object of the specified class. It is not * suitable to use if the specified class is a generic type since it will not have the generic * type information because of the Type Erasure feature of Java. Therefore, this method should not * be used if the desired type is a generic type. Note that this method works fine if the any of * the fields of the specified object are generics, just the object itself should not be a * generic type. * * @param json the string from which the object is to be deserialized * @param clazz the class of T * @param <T> T * @return an object of type T from the string * classOfT */ public static <T> T parseObject(String json, Class<T> clazz) { if (StringUtils.isEmpty(json)) { return null; } try { return objectMapper.readValue(json, clazz); } catch (Exception e) { logger.error("parse object exception!", e); } return null; } /** * deserialize * * @param src byte array * @param clazz class * @param <T> deserialize type * @return deserialize type */ public static <T> T parseObject(byte[] src, Class<T> clazz) { if (src == null) { return null; } String json = new String(src, UTF_8); return parseObject(json, clazz); } /** * json to list * * @param json json string * @param clazz class * @param <T> T * @return list */ public static <T> List<T> toList(String json, Class<T> clazz) { if (StringUtils.isEmpty(json)) { return Collections.emptyList(); } try { CollectionType listType = objectMapper.getTypeFactory().constructCollectionType(ArrayList.class, clazz); return objectMapper.readValue(json, listType); } catch (Exception e) { logger.error("parse list exception!", e); } return Collections.emptyList(); } /** * check json object valid * * @param json json * @return true if valid */ public static boolean checkJsonValid(String json) { if (StringUtils.isEmpty(json)) { return false; } try { objectMapper.readTree(json); return true; } catch (IOException e) { logger.error("check json object valid exception!", e); } return false; } /** * Method for finding a JSON Object field with specified name in this * node or its child nodes, and returning value it has. * If no matching field is found in this node or its descendants, returns null. * * @param jsonNode json node * @param fieldName Name of field to look for * @return Value of first matching node found, if any; null if none */ public static String findValue(JsonNode jsonNode, String fieldName) { JsonNode node = jsonNode.findValue(fieldName); if (node == null) { return null; } return node.asText(); } /** * json to map * {@link #toMap(String, Class, Class)} * * @param json json * @return json to map */ public static Map<String, String> toMap(String json) { return parseObject(json, new TypeReference<Map<String, String>>() {}); } /** * from the key-value generated json to get the str value no matter the real type of value * @param json the json str * @param nodeName key * @return the str value of key */ public static String getNodeString(String json, String nodeName) { try { JsonNode rootNode = objectMapper.readTree(json); return rootNode.has(nodeName) ? rootNode.get(nodeName).toString() : ""; } catch (JsonProcessingException e) { return ""; } } /** * json to map * * @param json json * @param classK classK * @param classV classV * @param <K> K * @param <V> V * @return to map */ public static <K, V> Map<K, V> toMap(String json, Class<K> classK, Class<V> classV) { return parseObject(json, new TypeReference<Map<K, V>>() {}); } /** * json to object * * @param json json string * @param type type reference * @param <T> * @return return parse object */ public static <T> T parseObject(String json, TypeReference<T> type) { if (StringUtils.isEmpty(json)) { return null; } try { return objectMapper.readValue(json, type); } catch (Exception e) { logger.error("json to map exception!", e); } return null; } /** * object to json string * * @param object object * @return json string */ public static String toJsonString(Object object) { try { return objectMapper.writeValueAsString(object); } catch (Exception e) { throw new RuntimeException("Object json deserialization exception.", e); } } /** * serialize to json byte * * @param obj object * @param <T> object type * @return byte array */ public static <T> byte[] toJsonByteArray(T obj) { if (obj == null) { return null; } String json = ""; try { json = toJsonString(obj); } catch (Exception e) { logger.error("json serialize exception.", e); } return json.getBytes(UTF_8); } public static ObjectNode parseObject(String text) { try { if (text.isEmpty()) { return parseObject(text, ObjectNode.class); } else { return (ObjectNode) objectMapper.readTree(text); } } catch (Exception e) { throw new RuntimeException("String json deserialization exception.", e); } } public static ArrayNode parseArray(String text) { try { return (ArrayNode) objectMapper.readTree(text); } catch (Exception e) { throw new RuntimeException("Json deserialization exception.", e); } } /** * json serializer */ public static class JsonDataSerializer extends JsonSerializer<String> { @Override public void serialize(String value, JsonGenerator gen, SerializerProvider provider) throws IOException { gen.writeRawValue(value); } } /** * json data deserializer */ public static class JsonDataDeserializer extends JsonDeserializer<String> { @Override public String deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { JsonNode node = p.getCodec().readTree(p); if (node instanceof TextNode) { return node.asText(); } else { return node.toString(); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/JSONUtilsTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.common.utils; import org.apache.dolphinscheduler.common.enums.DataType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.process.Property; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import org.junit.Assert; import org.junit.Test; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; public class JSONUtilsTest { @Test public void createArrayNodeTest() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "[{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"},{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}]"; JsonNode jsonNode = JSONUtils.toJsonNode(property); ArrayNode arrayNode = JSONUtils.createArrayNode(); ArrayList<JsonNode> objects = new ArrayList<>(); objects.add(jsonNode); objects.add(jsonNode); ArrayNode jsonNodes = arrayNode.addAll(objects); String s = JSONUtils.toJsonString(jsonNodes); Assert.assertEquals(s, str); } @Test public void toJsonNodeTest() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "{\"prop\":\"ds\",\"direct\":\"IN\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}"; JsonNode jsonNodes = JSONUtils.toJsonNode(property); String s = JSONUtils.toJsonString(jsonNodes); Assert.assertEquals(s, str); } @Test public void createObjectNodeTest() { String jsonStr = "{\"a\":\"b\",\"b\":\"d\"}"; ObjectNode objectNode = JSONUtils.createObjectNode(); objectNode.put("a","b"); objectNode.put("b","d"); String s = JSONUtils.toJsonString(objectNode); Assert.assertEquals(s, jsonStr); } @Test public void toMap() { String jsonStr = "{\"id\":\"1001\",\"name\":\"Jobs\"}"; Map<String, String> models = JSONUtils.toMap(jsonStr); Assert.assertEquals("1001", models.get("id")); Assert.assertEquals("Jobs", models.get("name")); } @Test public void convert2Property() { Property property = new Property(); property.setProp("ds"); property.setDirect(Direct.IN); property.setType(DataType.VARCHAR); property.setValue("sssssss"); String str = "{\"direct\":\"IN\",\"prop\":\"ds\",\"type\":\"VARCHAR\",\"value\":\"sssssss\"}"; Property property1 = JSONUtils.parseObject(str, Property.class); Direct direct = property1.getDirect(); Assert.assertEquals(Direct.IN, direct); } @Test public void string2MapTest() { String str = list2String(); List<LinkedHashMap> maps = JSONUtils.toList(str, LinkedHashMap.class); Assert.assertEquals(1, maps.size()); Assert.assertEquals("mysql200", maps.get(0).get("mysql service name")); Assert.assertEquals("192.168.xx.xx", maps.get(0).get("mysql address")); Assert.assertEquals("3306", maps.get(0).get("port")); Assert.assertEquals("80", maps.get(0).get("no index of number")); Assert.assertEquals("190", maps.get(0).get("database client connections")); } public String list2String() { LinkedHashMap<String, String> map1 = new LinkedHashMap<>(); map1.put("mysql service name", "mysql200"); map1.put("mysql address", "192.168.xx.xx"); map1.put("port", "3306"); map1.put("no index of number", "80"); map1.put("database client connections", "190"); List<LinkedHashMap<String, String>> maps = new ArrayList<>(); maps.add(0, map1); String resultJson = JSONUtils.toJsonString(maps); return resultJson; } @Test public void testParseObject() { Assert.assertNull(JSONUtils.parseObject("")); Assert.assertNull(JSONUtils.parseObject("foo", String.class)); } @Test public void testNodeString() { Assert.assertEquals("", JSONUtils.getNodeString("", "key")); Assert.assertEquals("", JSONUtils.getNodeString("abc", "key")); Assert.assertEquals("", JSONUtils.getNodeString("{\"bar\":\"foo\"}", "key")); Assert.assertEquals("\"foo\"", JSONUtils.getNodeString("{\"bar\":\"foo\"}", "bar")); } @Test public void testJsonByteArray() { String str = "foo"; byte[] serializeByte = JSONUtils.toJsonByteArray(str); String deserialize = JSONUtils.parseObject(serializeByte, String.class); Assert.assertEquals(str, deserialize); str = null; serializeByte = JSONUtils.toJsonByteArray(str); deserialize = JSONUtils.parseObject(serializeByte, String.class); Assert.assertNull(deserialize); } @Test public void testToList() { Assert.assertEquals(new ArrayList(), JSONUtils.toList("A1B2C3", null)); Assert.assertEquals(new ArrayList(), JSONUtils.toList("", null)); } @Test public void testCheckJsonValid() { Assert.assertTrue(JSONUtils.checkJsonValid("3")); Assert.assertFalse(JSONUtils.checkJsonValid("")); } @Test public void testFindValue() { Assert.assertNull(JSONUtils.findValue( new ArrayNode(new JsonNodeFactory(true)), null)); } @Test public void testToMap() { Map<String, String> map = new HashMap<>(); map.put("foo", "bar"); Assert.assertTrue(map.equals(JSONUtils.toMap( "{\n" + "\"foo\": \"bar\"\n" + "}"))); Assert.assertFalse(map.equals(JSONUtils.toMap( "{\n" + "\"bar\": \"foo\"\n" + "}"))); Assert.assertNull(JSONUtils.toMap("3")); Assert.assertNull(JSONUtils.toMap(null)); Assert.assertNull(JSONUtils.toMap("3", null, null)); Assert.assertNull(JSONUtils.toMap(null, null, null)); String str = "{\"resourceList\":[],\"localParams\":[],\"rawScript\":\"#!/bin/bash\\necho \\\"shell-1\\\"\"}"; Map<String, String> m = JSONUtils.toMap(str); Assert.assertNotNull(m); } @Test public void testToJsonString() { Map<String, Object> map = new HashMap<>(); map.put("foo", "bar"); Assert.assertEquals("{\"foo\":\"bar\"}", JSONUtils.toJsonString(map)); Assert.assertEquals(String.valueOf((Object) null), JSONUtils.toJsonString(null)); Assert.assertEquals("{\"foo\":\"bar\"}", JSONUtils.toJsonString(map, SerializationFeature.WRITE_NULL_MAP_VALUES)); } @Test public void parseObject() { String str = "{\"color\":\"yellow\",\"type\":\"renault\"}"; ObjectNode node = JSONUtils.parseObject(str); Assert.assertEquals("yellow", node.path("color").asText()); node.put("price", 100); Assert.assertEquals(100, node.path("price").asInt()); node.put("color", "red"); Assert.assertEquals("red", node.path("color").asText()); } @Test public void parseArray() { String str = "[{\"color\":\"yellow\",\"type\":\"renault\"}]"; ArrayNode node = JSONUtils.parseArray(str); Assert.assertEquals("yellow", node.path(0).path("color").asText()); } @Test public void jsonDataDeserializerTest() { String a = "{\"conditionResult\":\"{\\\"successNode\\\":[\\\"\\\"],\\\"failedNode\\\":[\\\"\\\"]}\"," + "\"conditionsTask\":false,\"depList\":[],\"dependence\":\"{}\",\"forbidden\":false," + "\"id\":\"tasks-86823\",\"maxRetryTimes\":1,\"name\":\"shell test\"," + "\"params\":\"{\\\"resourceList\\\":[],\\\"localParams\\\":[],\\\"rawScript\\\":\\\"echo " + "'yyc'\\\"}\",\"preTasks\":\"[]\",\"retryInterval\":1,\"runFlag\":\"NORMAL\"," + "\"taskInstancePriority\":\"HIGHEST\",\"taskTimeoutParameter\":{\"enable\":false,\"interval\":0}," + "\"timeout\":\"{}\",\"type\":\"SHELL\",\"workerGroup\":\"default\"}"; TaskNode taskNode = JSONUtils.parseObject(a, TaskNode.class); Assert.assertTrue(true); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/TaskInstance.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.dao.entity; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.task.dependent.DependentParameters; import org.apache.dolphinscheduler.common.task.switchtask.SwitchParameters; import org.apache.dolphinscheduler.common.utils.JSONUtils; import java.io.Serializable; import java.util.Date; import java.util.Map; import com.baomidou.mybatisplus.annotation.IdType; import com.baomidou.mybatisplus.annotation.TableField; import com.baomidou.mybatisplus.annotation.TableId; import com.baomidou.mybatisplus.annotation.TableName; import com.fasterxml.jackson.annotation.JsonFormat; /** * task instance */ @TableName("t_ds_task_instance") public class TaskInstance implements Serializable { /** * id */ @TableId(value = "id", type = IdType.AUTO) private int id; /** * task name */ private String name; /** * task type */ private String taskType; /** * process instance id */ private int processInstanceId; /** * task code */ private long taskCode; /** * task definition version */ private int taskDefinitionVersion; /** * process instance name */ @TableField(exist = false) private String processInstanceName; /** * state */ private ExecutionStatus state; /** * task first submit time. */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date firstSubmitTime; /** * task submit time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date submitTime; /** * task start time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date startTime; /** * task end time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date endTime; /** * task host */ private String host; /** * task shell execute path and the resource down from hdfs * default path: $base_run_dir/processInstanceId/taskInstanceId/retryTimes */ private String executePath; /** * task log path * default path: $base_run_dir/processInstanceId/taskInstanceId/retryTimes */ private String logPath; /** * retry times */ private int retryTimes; /** * alert flag */ private Flag alertFlag; /** * process instance */ @TableField(exist = false) private ProcessInstance processInstance; /** * process definition */ @TableField(exist = false) private ProcessDefinition processDefine; /** * task definition */ @TableField(exist = false) private TaskDefinition taskDefine; /** * process id */ private int pid; /** * appLink */ private String appLink; /** * flag */ private Flag flag; /** * dependency */ @TableField(exist = false) private DependentParameters dependency; /** * switch dependency */ @TableField(exist = false) private SwitchParameters switchDependency; /** * duration */ @TableField(exist = false) private String duration; /** * max retry times */ private int maxRetryTimes; /** * task retry interval, unit: minute */ private int retryInterval; /** * task intance priority */ private Priority taskInstancePriority; /** * process intance priority */ @TableField(exist = false) private Priority processInstancePriority; /** * dependent state */ @TableField(exist = false) private String dependentResult; /** * workerGroup */ private String workerGroup; /** * environment code */ private Long environmentCode; /** * environment config */ private String environmentConfig; /** * executor id */ private int executorId; /** * varPool string */ private String varPool; /** * executor name */ @TableField(exist = false) private String executorName; @TableField(exist = false) private Map<String, String> resources; /** * delay execution time. */ private int delayTime; /** * task params */ private String taskParams; public void init(String host, Date startTime, String executePath) { this.host = host; this.startTime = startTime; this.executePath = executePath; } public String getVarPool() { return varPool; } public void setVarPool(String varPool) { this.varPool = varPool; } public ProcessInstance getProcessInstance() { return processInstance; } public void setProcessInstance(ProcessInstance processInstance) { this.processInstance = processInstance; } public ProcessDefinition getProcessDefine() { return processDefine; } public void setProcessDefine(ProcessDefinition processDefine) { this.processDefine = processDefine; } public TaskDefinition getTaskDefine() { return taskDefine; } public void setTaskDefine(TaskDefinition taskDefine) { this.taskDefine = taskDefine; } public int getId() { return id; } public void setId(int id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getTaskType() { return taskType; } public void setTaskType(String taskType) { this.taskType = taskType; } public int getProcessInstanceId() { return processInstanceId; } public void setProcessInstanceId(int processInstanceId) { this.processInstanceId = processInstanceId; } public ExecutionStatus getState() { return state; } public void setState(ExecutionStatus state) { this.state = state; } public Date getFirstSubmitTime() { return firstSubmitTime; } public void setFirstSubmitTime(Date firstSubmitTime) { this.firstSubmitTime = firstSubmitTime; } public Date getSubmitTime() { return submitTime; } public void setSubmitTime(Date submitTime) { this.submitTime = submitTime; } public Date getStartTime() { return startTime; } public void setStartTime(Date startTime) { this.startTime = startTime; } public Date getEndTime() { return endTime; } public void setEndTime(Date endTime) { this.endTime = endTime; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public String getExecutePath() { return executePath; } public void setExecutePath(String executePath) { this.executePath = executePath; } public String getLogPath() { return logPath; } public void setLogPath(String logPath) { this.logPath = logPath; } public Flag getAlertFlag() { return alertFlag; } public void setAlertFlag(Flag alertFlag) { this.alertFlag = alertFlag; } public int getRetryTimes() { return retryTimes; } public void setRetryTimes(int retryTimes) { this.retryTimes = retryTimes; } public Boolean isTaskSuccess() { return this.state == ExecutionStatus.SUCCESS; } public int getPid() { return pid; } public void setPid(int pid) { this.pid = pid; } public String getAppLink() { return appLink; } public void setAppLink(String appLink) { this.appLink = appLink; } public Long getEnvironmentCode() { return this.environmentCode; } public void setEnvironmentCode(Long environmentCode) { this.environmentCode = environmentCode; } public String getEnvironmentConfig() { return this.environmentConfig; } public void setEnvironmentConfig(String environmentConfig) { this.environmentConfig = environmentConfig; } public DependentParameters getDependency() { if (this.dependency == null) { Map<String, Object> taskParamsMap = JSONUtils.toMap(this.getTaskParams(), String.class, Object.class); this.dependency = JSONUtils.parseObject((String) taskParamsMap.get(Constants.DEPENDENCE), DependentParameters.class); } return this.dependency; } public void setDependency(DependentParameters dependency) { this.dependency = dependency; } public SwitchParameters getSwitchDependency() { if (this.switchDependency == null) { Map<String, Object> taskParamsMap = JSONUtils.toMap(this.getTaskParams(), String.class, Object.class); this.switchDependency = JSONUtils.parseObject((String) taskParamsMap.get(Constants.SWITCH_RESULT), SwitchParameters.class); } return this.switchDependency; } public void setSwitchDependency(SwitchParameters switchDependency) { Map<String, Object> taskParamsMap = JSONUtils.toMap(this.getTaskParams(), String.class, Object.class); taskParamsMap.put(Constants.SWITCH_RESULT,JSONUtils.toJsonString(switchDependency)); this.setTaskParams(JSONUtils.toJsonString(taskParamsMap)); } public Flag getFlag() { return flag; } public void setFlag(Flag flag) { this.flag = flag; } public String getProcessInstanceName() { return processInstanceName; } public void setProcessInstanceName(String processInstanceName) { this.processInstanceName = processInstanceName; } public String getDuration() { return duration; } public void setDuration(String duration) { this.duration = duration; } public int getMaxRetryTimes() { return maxRetryTimes; } public void setMaxRetryTimes(int maxRetryTimes) { this.maxRetryTimes = maxRetryTimes; } public int getRetryInterval() { return retryInterval; } public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } public int getExecutorId() { return executorId; } public void setExecutorId(int executorId) { this.executorId = executorId; } public String getExecutorName() { return executorName; } public void setExecutorName(String executorName) { this.executorName = executorName; } public boolean isTaskComplete() { return this.getState().typeIsPause() || this.getState().typeIsSuccess() || this.getState().typeIsCancel() || (this.getState().typeIsFailure() && !taskCanRetry()); } public Map<String, String> getResources() { return resources; } public void setResources(Map<String, String> resources) { this.resources = resources; } public boolean isSubProcess() { return TaskType.SUB_PROCESS.getDesc().equalsIgnoreCase(this.taskType); } public boolean isDependTask() { return TaskType.DEPENDENT.getDesc().equalsIgnoreCase(this.taskType); } public boolean isConditionsTask() { return TaskType.CONDITIONS.getDesc().equalsIgnoreCase(this.taskType); } public boolean isSwitchTask() { return TaskType.SWITCH.getDesc().equalsIgnoreCase(this.taskType); } /** * determine if you can try again * * @return can try result */ public boolean taskCanRetry() { if (this.isSubProcess()) { return false; } if (this.getState() == ExecutionStatus.NEED_FAULT_TOLERANCE) { return true; } else { return (this.getState().typeIsFailure() && this.getRetryTimes() < this.getMaxRetryTimes()); } } public Priority getTaskInstancePriority() { return taskInstancePriority; } public void setTaskInstancePriority(Priority taskInstancePriority) { this.taskInstancePriority = taskInstancePriority; } public Priority getProcessInstancePriority() { return processInstancePriority; } public void setProcessInstancePriority(Priority processInstancePriority) { this.processInstancePriority = processInstancePriority; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public String getDependentResult() { return dependentResult; } public void setDependentResult(String dependentResult) { this.dependentResult = dependentResult; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime; } @Override public String toString() { return "TaskInstance{" + "id=" + id + ", name='" + name + '\'' + ", taskType='" + taskType + '\'' + ", processInstanceId=" + processInstanceId + ", processInstanceName='" + processInstanceName + '\'' + ", state=" + state + ", firstSubmitTime=" + firstSubmitTime + ", submitTime=" + submitTime + ", startTime=" + startTime + ", endTime=" + endTime + ", host='" + host + '\'' + ", executePath='" + executePath + '\'' + ", logPath='" + logPath + '\'' + ", retryTimes=" + retryTimes + ", alertFlag=" + alertFlag + ", processInstance=" + processInstance + ", processDefine=" + processDefine + ", pid=" + pid + ", appLink='" + appLink + '\'' + ", flag=" + flag + ", dependency='" + dependency + '\'' + ", duration=" + duration + ", maxRetryTimes=" + maxRetryTimes + ", retryInterval=" + retryInterval + ", taskInstancePriority=" + taskInstancePriority + ", processInstancePriority=" + processInstancePriority + ", dependentResult='" + dependentResult + '\'' + ", workerGroup='" + workerGroup + '\'' + ", environmentCode=" + environmentCode + ", environmentConfig='" + environmentConfig + '\'' + ", executorId=" + executorId + ", executorName='" + executorName + '\'' + ", delayTime=" + delayTime + '}'; } public long getTaskCode() { return taskCode; } public void setTaskCode(long taskCode) { this.taskCode = taskCode; } public int getTaskDefinitionVersion() { return taskDefinitionVersion; } public void setTaskDefinitionVersion(int taskDefinitionVersion) { this.taskDefinitionVersion = taskDefinitionVersion; } public String getTaskParams() { return taskParams; } public void setTaskParams(String taskParams) { this.taskParams = taskParams; } public boolean isFirstRun() { return endTime == null; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,072
[Bug][common] Generics type is invalid in JsonUtils.toMap
**Describe the bug** This method is hope to deserialize a JSON to Map. https://github.com/apache/dolphinscheduler/blob/e866d1be86464d812551e8c38ba60767a204c82e/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/JSONUtils.java#L249-L251 But the generics type in the Map is not what we expected. We can write a simple test case: ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); System.out.println(param); ``` It will execute success, and print ``` {id=[], test1=6} ``` The value type of `id` is `List`, the value type of `test1` is `String`. But If we loop the map, it will throw a exception, because the value in this map has different type ```java String sqlResult = "{\"id\":[],\"test1\":\"6\"}"; Map<String, List> param = toMap(sqlResult, String.class, List.class); param.forEach((key, value) -> System.out.println(value)); ``` ``` [] Exception in thread "main" java.lang.ClassCastException: java.lang.String cannot be cast to java.util.List at java.util.LinkedHashMap.forEach(LinkedHashMap.java:684) at org.apache.dolphinscheduler.common.utils.JSONUtils.main(JSONUtils.java:256) ``` I don't find out the deep reason, but I think we should remove this method. **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6072
https://github.com/apache/dolphinscheduler/pull/6283
c855a5926ceb9cfb6efb0c901c8d92fe3774e999
46257834792d3316c9f5ef885f38e069e4eb6452
"2021-08-31T10:30:15Z"
java
"2021-09-23T15:06:53Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import com.fasterxml.jackson.core.type.TypeReference; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; break; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); updateTaskDefinitionResources(taskDefinition); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * Update {@link ResourceInfo} information in {@link TaskDefinition} * * @param taskDefinition the given {@link TaskDefinition} */ private void updateTaskDefinitionResources(TaskDefinition taskDefinition) { Map<String, Object> taskParameters = JSONUtils.parseObject( taskDefinition.getTaskParams(), new TypeReference<Map<String, Object>>() { }); if (taskParameters != null) { // if contains mainJar field, query resource from database // Flink, Spark, MR if (taskParameters.containsKey("mainJar")) { Object mainJarObj = taskParameters.get("mainJar"); ResourceInfo mainJar = JSONUtils.parseObject( JSONUtils.toJsonString(mainJarObj), ResourceInfo.class); ResourceInfo resourceInfo = updateResourceInfo(mainJar); if (resourceInfo != null) { taskParameters.put("mainJar", resourceInfo); } } // update resourceList information if (taskParameters.containsKey("resourceList")) { String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList")); List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class); List<ResourceInfo> updatedResourceInfos = resourceInfos .stream() .map(this::updateResourceInfo) .filter(Objects::nonNull) .collect(Collectors.toList()); taskParameters.put("resourceList", updatedResourceInfos); } // set task parameters taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters)); } } /** * update {@link ResourceInfo} by given original ResourceInfo * * @param res origin resource info * @return {@link ResourceInfo} */ private ResourceInfo updateResourceInfo(ResourceInfo res) { ResourceInfo resourceInfo = null; // only if mainJar is not null and does not contains "resourceName" field if (res != null) { int resourceId = res.getId(); if (resourceId <= 0) { logger.error("invalid resourceId, {}", resourceId); return null; } resourceInfo = new ResourceInfo(); // get resource from database, only one resource should be returned Resource resource = getResourceById(resourceId); resourceInfo.setId(resourceId); resourceInfo.setRes(resource.getFileName()); resourceInfo.setResourceName(resource.getFullName()); if (logger.isInfoEnabled()) { logger.info("updated resource info {}", JSONUtils.toJsonString(resourceInfo)); } } return resourceInfo; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.toMap(taskInstance.getTaskParams(), String.class, Object.class); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinition); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0; int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } } if (!newTaskDefinitionLogs.isEmpty()) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,323
[Bug] [MasterServer] make CPU 100% even without task
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev master-server make CPU high even without task. because EventExecuteService run loop without sleep. ### What you expected to happen low CPU usage when idle ### How to reproduce run master-server and observe CPU usage. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6323
https://github.com/apache/dolphinscheduler/pull/6324
c3324cc101b39eb789cccd9dee7e3cc5aae8722b
89cc045961477168ac8ff74c5d6604b606214775
"2021-09-24T02:13:49Z"
java
"2021-09-24T08:19:02Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/EventExecuteService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.StateEventCallbackService; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ListeningExecutorService; import com.google.common.util.concurrent.MoreExecutors; @Service public class EventExecuteService extends Thread { private static final Logger logger = LoggerFactory.getLogger(EventExecuteService.class); /** * dolphinscheduler database interface */ @Autowired private ProcessService processService; @Autowired private MasterConfig masterConfig; private ExecutorService eventExecService; /** * */ private StateEventCallbackService stateEventCallbackService; private ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps; private ConcurrentHashMap<String, WorkflowExecuteThread> eventHandlerMap = new ConcurrentHashMap(); ListeningExecutorService listeningExecutorService; public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps) { eventExecService = ThreadUtils.newDaemonFixedThreadExecutor("MasterEventExecution", masterConfig.getMasterExecThreads()); this.processInstanceExecMaps = processInstanceExecMaps; listeningExecutorService = MoreExecutors.listeningDecorator(eventExecService); this.stateEventCallbackService = SpringApplicationContext.getBean(StateEventCallbackService.class); } @Override public synchronized void start() { super.setName("EventServiceStarted"); super.start(); } public void close() { eventExecService.shutdown(); logger.info("event service stopped..."); } @Override public void run() { logger.info("Event service started"); while (Stopper.isRunning()) { try { eventHandler(); } catch (Exception e) { logger.error("Event service thread error", e); } } } private void eventHandler() { for (WorkflowExecuteThread workflowExecuteThread : this.processInstanceExecMaps.values()) { if (workflowExecuteThread.eventSize() == 0 || StringUtils.isEmpty(workflowExecuteThread.getKey()) || eventHandlerMap.containsKey(workflowExecuteThread.getKey())) { continue; } int processInstanceId = workflowExecuteThread.getProcessInstance().getId(); logger.info("handle process instance : {} events, count:{}", processInstanceId, workflowExecuteThread.eventSize()); logger.info("already exists handler process size:{}", this.eventHandlerMap.size()); eventHandlerMap.put(workflowExecuteThread.getKey(), workflowExecuteThread); ListenableFuture future = this.listeningExecutorService.submit(workflowExecuteThread); FutureCallback futureCallback = new FutureCallback() { @Override public void onSuccess(Object o) { if (workflowExecuteThread.workFlowFinish()) { processInstanceExecMaps.remove(processInstanceId); notifyProcessChanged(); logger.info("process instance {} finished.", processInstanceId); } if (workflowExecuteThread.getProcessInstance().getId() != processInstanceId) { processInstanceExecMaps.remove(processInstanceId); processInstanceExecMaps.put(workflowExecuteThread.getProcessInstance().getId(), workflowExecuteThread); } eventHandlerMap.remove(workflowExecuteThread.getKey()); } private void notifyProcessChanged() { Map<ProcessInstance, TaskInstance> fatherMaps = processService.notifyProcessList(processInstanceId, 0); for (ProcessInstance processInstance : fatherMaps.keySet()) { String address = NetUtils.getAddr(masterConfig.getListenPort()); if (processInstance.getHost().equalsIgnoreCase(address)) { notifyMyself(processInstance, fatherMaps.get(processInstance)); } else { notifyProcess(processInstance, fatherMaps.get(processInstance)); } } } private void notifyMyself(ProcessInstance processInstance, TaskInstance taskInstance) { logger.info("notify process {} task {} state change", processInstance.getId(), taskInstance.getId()); if (!processInstanceExecMaps.containsKey(processInstance.getId())) { return; } WorkflowExecuteThread workflowExecuteThreadNotify = processInstanceExecMaps.get(processInstance.getId()); StateEvent stateEvent = new StateEvent(); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(processInstance.getId()); stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); workflowExecuteThreadNotify.addStateEvent(stateEvent); } private void notifyProcess(ProcessInstance processInstance, TaskInstance taskInstance) { String host = processInstance.getHost(); if (StringUtils.isEmpty(host)) { logger.info("process {} host is empty, cannot notify task {} now.", processInstance.getId(), taskInstance.getId()); return; } String address = host.split(":")[0]; int port = Integer.parseInt(host.split(":")[1]); logger.info("notify process {} task {} state change, host:{}", processInstance.getId(), taskInstance.getId(), host); StateEventChangeCommand stateEventChangeCommand = new StateEventChangeCommand( processInstanceId, 0, workflowExecuteThread.getProcessInstance().getState(), processInstance.getId(), taskInstance.getId() ); stateEventCallbackService.sendResult(address, port, stateEventChangeCommand.convert2Command()); } @Override public void onFailure(Throwable throwable) { } }; Futures.addCallback(future, futureCallback, this.listeningExecutorService); } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,331
[Bug] [TaskPlugin] PSQLException: statement has been closed when run sql task
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev model: dev-run, master-server、worker-server、api-server run by idea dolphinscheduler-server worker.properties config: `task.plugin.binding=./dolphinscheduler-task-plugin/dolphinscheduler-task-shell/pom.xml,./dolphinscheduler-task-plugin/dolphinscheduler-task-python/pom.xml,./dolphinscheduler-task-plugin/dolphinscheduler-task-sql/pom.xml` dolphinscheduler-task-sql pom.xml add jar dependency: ``` <dependency> <groupId>org.postgresql</groupId> <artifactId>postgresql</artifactId> </dependency> <dependency> <groupId>commons-codec</groupId> <artifactId>commons-codec</artifactId> </dependency> <dependency> <groupId>com.alibaba</groupId> <artifactId>druid</artifactId> </dependency>` ``` exception: ``` [INFO] 2021-09-24 15:54:37.813 /home/csf/javapath/dolphinscheduler/logs/754963903078400_1/28/52.log:[386] - prepare statement replace sql : select count(1) from t_ds_task_instance [ERROR] 2021-09-24 15:54:37.815 /home/csf/javapath/dolphinscheduler/logs/754963903078400_1/28/52.log:[202] - execute sql error: 这个 statement 已经被关闭。 [ERROR] 2021-09-24 15:54:37.815 /home/csf/javapath/dolphinscheduler/logs/754963903078400_1/28/52.log:[153] - sql task error: org.postgresql.util.PSQLException: 这个 statement 已经被关闭。 [ERROR] 2021-09-24 15:54:37.817 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[212] - task scheduler failure org.postgresql.util.PSQLException: 这个 statement 已经被关闭。 at org.postgresql.jdbc.PgStatement.checkClosed(PgStatement.java:694) at org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:137) at org.postgresql.jdbc.PgPreparedStatement.executeQuery(PgPreparedStatement.java:106) at org.apache.dolphinscheduler.plugin.task.sql.SqlTask.executeFuncAndSql(SqlTask.java:190) at org.apache.dolphinscheduler.plugin.task.sql.SqlTask.handle(SqlTask.java:147) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:198) ``` ### What you expected to happen run sql task success ### How to reproduce create a process contains sql task with pgsql source, and run it ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6331
https://github.com/apache/dolphinscheduler/pull/6334
89cc045961477168ac8ff74c5d6604b606214775
61c64619949b2098f7efcd2cfcc701ef98564083
"2021-09-24T08:21:29Z"
java
"2021-09-24T10:39:04Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-sql/src/main/java/org/apache/dolphinscheduler/plugin/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.sql; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskException; import org.apache.dolphinscheduler.plugin.task.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.plugin.task.datasource.DatasourceUtil; import org.apache.dolphinscheduler.plugin.task.util.MapUtils; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.spi.task.AbstractParameters; import org.apache.dolphinscheduler.spi.task.Direct; import org.apache.dolphinscheduler.spi.task.Property; import org.apache.dolphinscheduler.spi.task.TaskAlertInfo; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.paramparser.ParamUtils; import org.apache.dolphinscheduler.spi.task.paramparser.ParameterUtils; import org.apache.dolphinscheduler.spi.task.request.SQLTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.task.request.UdfFuncRequest; import org.apache.dolphinscheduler.spi.utils.CollectionUtils; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; public class SqlTask extends AbstractTaskExecutor { /** * taskExecutionContext */ private TaskRequest taskExecutionContext; /** * sql parameters */ private SqlParameters sqlParameters; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * create function format */ private static final String CREATE_FUNCTION_FORMAT = "create temporary function {0} as ''{1}''"; /** * Abstract Yarn Task * * @param taskRequest taskRequest */ public SqlTask(TaskRequest taskRequest) { super(taskRequest); this.taskExecutionContext = taskRequest; this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); assert sqlParameters != null; if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } } @Override public AbstractParameters getParameters() { return sqlParameters; } @Override public void handle() throws Exception { // set the name of the current thread String threadLoggerInfoName = String.format(TaskConstants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {},varPool : {} ,query max result limit {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams(), sqlParameters.getVarPool(), sqlParameters.getLimit()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); // get datasource baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); // ready to execute SQL and parameter entity Map SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), logger); // execute sql task executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(TaskConstants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(TaskConstants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // create connection connection = DatasourceUtil.getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // create temp function if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); } // pre sql preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // decide whether to executeQuery or executeUpdate based on sqlType if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // query statements need to be convert to JsonArray and inserted into Alert to send resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // non query statement String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } //deal out params sqlParameters.dealOutParam(result); postSql(connection, postStatementsBinds); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } private String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info : properties) { if (Direct.OUT == info.getDirect()) { List<Map<String, String>> updateRL = new ArrayList<>(); Map<String, String> updateRM = new HashMap<>(); updateRM.put(info.getProp(), updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; while (rowCount < sqlParameters.getLimit() && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : TaskConstants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size()); logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * send alert as an attachment * * @param title title * @param content content */ private void sendAttachment(int groupId, String title, String content) { setNeedAlert(Boolean.TRUE); TaskAlertInfo taskAlertInfo = new TaskAlertInfo(); taskAlertInfo.setAlertGroupId(groupId); taskAlertInfo.setContent(content); taskAlertInfo.setTitle(title); } /** * pre sql * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } } /** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * create temp function * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } } /** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); } } } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) { // is the timeout set boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; try (PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql())) { if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } catch (Exception exception) { throw new TaskException("SQL task prepareStatementAndBind error", exception); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); if (prop == null) { logger.error("setSqlParamsMap: No Property with paramName: {} is found in paramsPropsMap of task instance" + " with id: {}. So couldn't put Property in sqlParamsMap.", paramName, taskExecutionContext.getTaskInstanceId()); } else { sqlParamsMap.put(index, prop); index++; logger.info("setSqlParamsMap: Property with paramName: {} put in sqlParamsMap of content {} successfully.", paramName, content); } } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ private void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //parameter print style logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); if (sqlParamsMap == null) { logger.info("printReplacedSql: sqlParamsMap is null."); } else { for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue()).append("(").append(sqlParamsMap.get(i).getType()).append(")"); } } logger.info("Sql Params are {}", logPrint); } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext, getParameters()); // spell SQL according to the final user-defined variable if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) { String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } //new //replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); // special characters need to be escaped, ${} needs to be escaped String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); //Replace the original value in sql !{...} ,Does not participate in precompilation String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // replace the ${} of the SQL statement with the Placeholder String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // print repalce sql printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } private String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue(); content = m.replaceFirst(paramValue); } return content; } /** * create function list * * @param udfFuncTenantCodeMap key is udf function,value is tenant code * @param logger logger * @return create function list */ public static List<String> createFuncs(Map<UdfFuncRequest, String> udfFuncTenantCodeMap, Logger logger) { if (MapUtils.isEmpty(udfFuncTenantCodeMap)) { logger.info("can't find udf function resource"); return null; } List<String> funcList = new ArrayList<>(); // build temp function sql buildTempFuncSql(funcList, new ArrayList<>(udfFuncTenantCodeMap.keySet())); return funcList; } /** * build temp function sql * * @param sqls sql list * @param udfFuncRequests udf function list */ private static void buildTempFuncSql(List<String> sqls, List<UdfFuncRequest> udfFuncRequests) { if (CollectionUtils.isNotEmpty(udfFuncRequests)) { for (UdfFuncRequest udfFuncRequest : udfFuncRequests) { sqls.add(MessageFormat .format(CREATE_FUNCTION_FORMAT, udfFuncRequest.getFuncName(), udfFuncRequest.getClassName())); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,170
[Bug] [UI] Delete project error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Delete project error ### What you expected to happen Delete workflow normally ### How to reproduce ![Snipaste_2021-09-11_13-08-08](https://user-images.githubusercontent.com/56899730/132937088-0fd626c4-c6f6-42bb-8661-d334b3dfe70f.PNG) ### Anything else -dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6170
https://github.com/apache/dolphinscheduler/pull/6174
5dcfad98492c3e0f2e9f53381428de3d15d60c67
b71f6aea7af899da41bd958caa30d68d89a9aec4
"2021-09-11T05:17:36Z"
java
"2021-09-25T03:58:48Z"
dolphinscheduler-ui/src/js/conf/home/pages/projects/pages/list/_source/list.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="list-model"> <div class="table-box"> <el-table :data="list" size="mini" style="width: 100%"> <el-table-column type="index" :label="$t('#')" width="50"></el-table-column> <el-table-column :label="$t('Project Name')"> <template slot-scope="scope"> <el-popover trigger="hover" placement="top"> <p>{{ scope.row.name }}</p> <div slot="reference" class="name-wrapper"> <a href="javascript:" class="links" @click="_switchProjects(scope.row)">{{ scope.row.name }}</a> </div> </el-popover> </template> </el-table-column> <el-table-column prop="userName" :label="$t('Owned Users')"></el-table-column> <el-table-column prop="defCount" :label="$t('Process Define Count')"></el-table-column> <el-table-column prop="instRunningCount" :label="$t('Process Instance Running Count')"></el-table-column> <el-table-column :label="$t('Description')" width="200"> <template slot-scope="scope"> <span>{{scope.row.description | filterNull}}</span> </template> </el-table-column> <el-table-column :label="$t('Create Time')" min-width="120"> <template slot-scope="scope"> <span>{{scope.row.createTime | formatDate}}</span> </template> </el-table-column> <el-table-column :label="$t('Update Time')" min-width="120"> <template slot-scope="scope"> <span>{{scope.row.updateTime | formatDate}}</span> </template> </el-table-column> <el-table-column :label="$t('Operation')" width="150"> <template slot-scope="scope"> <el-tooltip :content="$t('Edit')" placement="top" :enterable="false"> <span><el-button type="primary" size="mini" icon="el-icon-edit-outline" @click="_edit(scope.row)" circle></el-button></span> </el-tooltip> <el-tooltip :content="$t('Delete')" placement="top" :enterable="false"> <el-popconfirm :confirmButtonText="$t('Confirm')" :cancelButtonText="$t('Cancel')" icon="el-icon-info" iconColor="red" :title="$t('Delete?')" @onConfirm="_delete(scope.row,scope.row.id)" > <el-button type="danger" size="mini" icon="el-icon-delete" circle slot="reference"></el-button> </el-popconfirm> </el-tooltip> </template> </el-table-column> </el-table> </div> </div> </template> <script> import { mapActions, mapMutations } from 'vuex' import localStore from '@/module/util/localStorage' import { findComponentDownward } from '@/module/util/' export default { name: 'projects-list', data () { return { list: [] } }, props: { projectsList: Array, pageNo: Number, pageSize: Number }, methods: { ...mapActions('projects', ['deleteProjects']), ...mapMutations('dag', ['setProjectId', 'setProjectCode', 'setProjectName']), _switchProjects (item) { this.setProjectId(item.id) this.setProjectCode(item.code) this.setProjectName(item.name) localStore.setItem('projectId', item.id) localStore.setItem('projectCode', item.code) localStore.setItem('projectName', item.name) this.$router.push({ path: `/projects/${item.code}/index` }) }, /** * Delete Project * @param item Current record * @param i index */ _delete (item, i) { this.deleteProjects({ projectId: item.id }).then(res => { this.$emit('on-update') this.$message.success(res.msg) }).catch(e => { this.$message.error(e.msg || '') }) }, /** * edit project * @param item Current record */ _edit (item) { findComponentDownward(this.$root, 'projects-list')._create(item) } }, watch: { projectsList (a) { this.list = [] setTimeout(() => { this.list = a }) } }, created () { }, mounted () { this.list = this.projectsList }, components: { } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,170
[Bug] [UI] Delete project error
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened Delete project error ### What you expected to happen Delete workflow normally ### How to reproduce ![Snipaste_2021-09-11_13-08-08](https://user-images.githubusercontent.com/56899730/132937088-0fd626c4-c6f6-42bb-8661-d334b3dfe70f.PNG) ### Anything else -dev ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6170
https://github.com/apache/dolphinscheduler/pull/6174
5dcfad98492c3e0f2e9f53381428de3d15d60c67
b71f6aea7af899da41bd958caa30d68d89a9aec4
"2021-09-11T05:17:36Z"
java
"2021-09-25T03:58:48Z"
dolphinscheduler-ui/src/js/conf/home/store/projects/actions.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import io from '@/module/io' export default { /** * Get item list */ getProjectsList ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects', payload, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Get project by id */ getProjectById ({ state }, payload) { return new Promise((resolve, reject) => { io.get(`projects/${payload}`, {}, res => { resolve(res.data) }).catch(e => { reject(e) }) }) }, /** * Create project */ createProjects ({ state }, payload) { return new Promise((resolve, reject) => { io.post('projects', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Delete Project */ deleteProjects ({ state }, payload) { return new Promise((resolve, reject) => { io.delete(`projects/${payload}`, {}, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * edit Project */ updateProjects ({ state }, payload) { return new Promise((resolve, reject) => { io.put(`projects/${payload.projectCode}`, payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Task status statistics */ getTaskStatusCount ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/analysis/task-state-count', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get command state count */ getCommandStateCount ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/analysis/command-state-count', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * get command state count */ getQueueCount ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/analysis/queue-count', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process status statistics */ getProcessStateCount ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/analysis/process-state-count', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) }, /** * Process definition statistics */ getDefineUserCount ({ state }, payload) { return new Promise((resolve, reject) => { io.get('projects/analysis/define-user-count', payload, res => { resolve(res) }).catch(e => { reject(e) }) }) } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,347
[Bug] [Master] the first schedule_time is error in complement data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened schedule_time is set error: command_param: {"complementEndDate":"2021-09-21 00:26:00","complementStartDate":"2021-09-21 00:24:00"} schedule_time: 2021-09-21 00:24:00 cron: 0 * * * * ? * ### What you expected to happen the schedule_time is set OK. schedule_time: 2021-09-21 00:25:00 ### How to reproduce complement data cron : 0 * * * * ? * complement data some date, you would find some errors in schedule_time. ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6347
https://github.com/apache/dolphinscheduler/pull/6351
b71f6aea7af899da41bd958caa30d68d89a9aec4
20a3741b249b871ffdf975de111244e6c68f3f07
"2021-09-25T08:58:23Z"
java
"2021-09-26T03:55:57Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/MasterSchedulerService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.NettyRemotingClient; import org.apache.dolphinscheduler.remote.config.NettyClientConfig; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.registry.MasterRegistryClient; import org.apache.dolphinscheduler.server.master.registry.ServerNodeManager; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; /** * master scheduler thread */ @Service public class MasterSchedulerService extends Thread { /** * logger of MasterSchedulerService */ private static final Logger logger = LoggerFactory.getLogger(MasterSchedulerService.class); /** * dolphinscheduler database interface */ @Autowired private ProcessService processService; /** * zookeeper master client */ @Autowired private MasterRegistryClient masterRegistryClient; /** * master config */ @Autowired private MasterConfig masterConfig; /** * alert manager */ @Autowired private ProcessAlertManager processAlertManager; /** * netty remoting client */ private NettyRemotingClient nettyRemotingClient; @Autowired NettyExecutorManager nettyExecutorManager; /** * master exec service */ private ThreadPoolExecutor masterExecService; private ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps; ConcurrentHashMap<Integer, ProcessInstance> processTimeoutCheckList = new ConcurrentHashMap<>(); ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList = new ConcurrentHashMap<>(); private StateWheelExecuteThread stateWheelExecuteThread; /** * constructor of MasterSchedulerService */ public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps) { this.processInstanceExecMaps = processInstanceExecMaps; this.masterExecService = (ThreadPoolExecutor) ThreadUtils.newDaemonFixedThreadExecutor("Master-Exec-Thread", masterConfig.getMasterExecThreads()); NettyClientConfig clientConfig = new NettyClientConfig(); this.nettyRemotingClient = new NettyRemotingClient(clientConfig); stateWheelExecuteThread = new StateWheelExecuteThread(processTimeoutCheckList, taskTimeoutCheckList, this.processInstanceExecMaps, masterConfig.getStateWheelInterval() * Constants.SLEEP_TIME_MILLIS); } @Override public synchronized void start() { super.setName("MasterSchedulerService"); super.start(); this.stateWheelExecuteThread.start(); } public void close() { masterExecService.shutdown(); boolean terminated = false; try { terminated = masterExecService.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException ignore) { Thread.currentThread().interrupt(); } if (!terminated) { logger.warn("masterExecService shutdown without terminated, increase await time"); } nettyRemotingClient.close(); logger.info("master schedule service stopped..."); } /** * run of MasterSchedulerService */ @Override public void run() { logger.info("master scheduler started"); while (Stopper.isRunning()) { try { boolean runCheckFlag = OSUtils.checkResource(masterConfig.getMasterMaxCpuloadAvg(), masterConfig.getMasterReservedMemory()); if (!runCheckFlag) { Thread.sleep(Constants.SLEEP_TIME_MILLIS); continue; } scheduleProcess(); } catch (Exception e) { logger.error("master scheduler thread error", e); } } } /** * 1. get command by slot * 2. donot handle command if slot is empty * * @throws Exception */ private void scheduleProcess() throws Exception { int activeCount = masterExecService.getActiveCount(); // make sure to scan and delete command table in one transaction Command command = findOneCommand(); if (command != null) { logger.info("find one command: id: {}, type: {}", command.getId(), command.getCommandType()); try { ProcessInstance processInstance = processService.handleCommand(logger, getLocalAddress(), this.masterConfig.getMasterExecThreads() - activeCount, command); if (processInstance != null) { WorkflowExecuteThread workflowExecuteThread = new WorkflowExecuteThread( processInstance , processService , nettyExecutorManager , processAlertManager , masterConfig , taskTimeoutCheckList); this.processInstanceExecMaps.put(processInstance.getId(), workflowExecuteThread); if (processInstance.getTimeout() > 0) { this.processTimeoutCheckList.put(processInstance.getId(), processInstance); } logger.info("command {} process {} start...", command.getId(), processInstance.getId()); masterExecService.execute(workflowExecuteThread); } } catch (Exception e) { logger.error("scan command error ", e); processService.moveToErrorCommand(command, e.toString()); } } else { //indicate that no command ,sleep for 1s Thread.sleep(Constants.SLEEP_TIME_MILLIS); } } private Command findOneCommand() { int pageNumber = 0; Command result = null; while (Stopper.isRunning()) { if (ServerNodeManager.MASTER_SIZE == 0) { return null; } List<Command> commandList = processService.findCommandPage(ServerNodeManager.MASTER_SIZE, pageNumber); if (commandList.size() == 0) { return null; } for (Command command : commandList) { int slot = ServerNodeManager.getSlot(); if (ServerNodeManager.MASTER_SIZE != 0 && command.getId() % ServerNodeManager.MASTER_SIZE == slot) { result = command; break; } } if (result != null) { logger.info("find command {}, slot:{} :", result.getId(), ServerNodeManager.getSlot()); break; } pageNumber += 1; } return result; } private String getLocalAddress() { return NetUtils.getAddr(masterConfig.getListenPort()); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,347
[Bug] [Master] the first schedule_time is error in complement data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened schedule_time is set error: command_param: {"complementEndDate":"2021-09-21 00:26:00","complementStartDate":"2021-09-21 00:24:00"} schedule_time: 2021-09-21 00:24:00 cron: 0 * * * * ? * ### What you expected to happen the schedule_time is set OK. schedule_time: 2021-09-21 00:25:00 ### How to reproduce complement data cron : 0 * * * * ? * complement data some date, you would find some errors in schedule_time. ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6347
https://github.com/apache/dolphinscheduler/pull/6351
b71f6aea7af899da41bd958caa30d68d89a9aec4
20a3741b249b871ffdf975de111244e6c68f3f07
"2021-09-25T08:58:23Z"
java
"2021-09-26T03:55:57Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.HashBasedTable; import com.google.common.collect.Lists; import com.google.common.collect.Table; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * runing TaskNode */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * task exec service */ private final ExecutorService taskExecService; /** * process instance */ private ProcessInstance processInstance; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * recover node id list */ private List<TaskInstance> recoverNodeIdList = new ArrayList<>(); /** * error task list */ private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>(); /** * complete task list */ private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * depend failed task map */ private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>(); /** * forbidden task map */ private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>(); /** * skip task map */ private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>(); /** * recover tolerance fault task list */ private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>(); /** * alert manager */ private ProcessAlertManager processAlertManager; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * process service */ private ProcessService processService; /** * master config */ private MasterConfig masterConfig; /** * */ private NettyExecutorManager nettyExecutorManager; private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); private List<Date> complementListDate = Lists.newLinkedList(); private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create(); private ProcessDefinition processDefinition; private String key; private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * start flag, true: start nodes submit completely * */ private boolean isStart = false; /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param taskTimeoutCheckList */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; int masterTaskExecNum = masterConfig.getMasterExecTaskNum(); this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread", masterTaskExecNum); this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; } @Override public void run() { try { startProcess(); handleEvents(); } catch (Exception e) { logger.error("handler error:", e); } } /** * the process start nodes are submitted completely. * @return */ public boolean isStart() { return this.isStart; } private void handleEvents() { while (this.stateEvents.size() > 0) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("{}_{}_{}", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkStateEvent(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean taskTimeout(StateEvent stateEvent) { if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) { return true; } TaskInstance taskInstance = taskInstanceHashMap .row(stateEvent.getTaskInstanceId()) .values() .iterator().next(); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); if (stateEvent.getExecutionStatus().typeIsFinished()) { taskFinished(task); } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent.toString()); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); return; } ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId()); completeTaskList.put(task.getName(), task); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(task.getName()); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { submitPostNode(task.getName()); } else { errorTaskList.put(task.getName(), task); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } private boolean checkStateEvent(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent.toString()); return false; } return true; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); processInstance = processService.findProcessInstanceById(this.processInstance.getId()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (stateEvent.getExecutionStatus() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { // complement data ends || no success return false; } scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceHashMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { if (this.taskInstanceHashMap.size() == 0) { isStart = false; buildFlowDag(); initTaskQueue(); submitPostNode(null); isStart = true; } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList()); forbiddenTaskList.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskList.put(taskNode.getName(), taskNode); } }); // generate process to get DAG info List<String> recoveryNameList = getRecoveryNodeNameList(); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNameList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTask.clear(); completeTaskList.clear(); errorTaskList.clear(); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : taskInstanceList) { if (task.isTaskComplete()) { completeTaskList.put(task.getName(), task); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskList.put(task.getName(), task); } } if (complementListDate.size() == 0 && needComplementProcess()) { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date startDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); Date endDate = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); if (startDate.after(endDate)) { Date tmp = startDate; startDate = endDate; endDate = tmp; } List<Schedule> schedules = processService.queryReleaseSchedulerListByProcessDefinitionCode(processInstance.getProcessDefinitionCode()); complementListDate.addAll(CronUtils.getSelfFireDateList(startDate, endDate, schedules)); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval()); if (submit) { this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } else { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion() ); taskInstance.setTaskDefine(taskDefinition); if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); return; } if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskName : preTask) { TaskInstance preTaskInstance = completeTaskList.get(preTaskName); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } private void submitPostNode(String parentNodeName) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskList.containsKey(task.getName())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); } else { addTaskToStandByList(task); } } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskName) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskName)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskName); List<String> depNameList = taskNode.getDepList(); for (String depsNode : depNameList) { if (!dag.containsNode(depsNode) || forbiddenTaskList.containsKey(depsNode) || skipTaskNodeList.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskList.containsKey(depsNode)) { return DependResult.WAITING; } ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskName)) { return DependResult.FAILED; } } logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { if (entry.getValue().getState() == state) { resultList.add(entry.getValue()); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskList.size() > 0) { return true; } return this.dependFailedTask.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @param instance * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); ExecutionStatus state = getProcessInstanceState(instance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); instance.setState(state); processService.updateProcessInstance(instance); processInstance = instance; StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(taskInstance.getName()); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * whether the retry interval is timed out * * @param taskInstance task instance * @return Boolean */ private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) { if (taskInstance.getState() != ExecutionStatus.FAILURE) { return true; } if (taskInstance.getId() == 0 || taskInstance.getMaxRetryTimes() == 0 || taskInstance.getRetryInterval() == 0) { return true; } Date now = new Date(); long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime()); // task retry does not over time, return false return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval; } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(task.getName()); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (retryTaskIntervalOverTime(task)) { TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTask.put(task.getName(), task); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node name list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node name list */ private List<String> getRecoveryNodeNameList() { List<String> recoveryNodeNameList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeIdList)) { for (TaskInstance task : recoverNodeIdList) { recoveryNodeNameList.add(task.getName()); } } return recoveryNodeNameList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeNameList recovery node name list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeNameList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,347
[Bug] [Master] the first schedule_time is error in complement data
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened schedule_time is set error: command_param: {"complementEndDate":"2021-09-21 00:26:00","complementStartDate":"2021-09-21 00:24:00"} schedule_time: 2021-09-21 00:24:00 cron: 0 * * * * ? * ### What you expected to happen the schedule_time is set OK. schedule_time: 2021-09-21 00:25:00 ### How to reproduce complement data cron : 0 * * * * ? * complement data some date, you would find some errors in schedule_time. ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6347
https://github.com/apache/dolphinscheduler/pull/6351
b71f6aea7af899da41bd958caa30d68d89a9aec4
20a3741b249b871ffdf975de111244e6c68f3f07
"2021-09-25T08:58:23Z"
java
"2021-09-26T03:55:57Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.process; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_EMPTY_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_FATHER_PARAMS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID; import static org.apache.dolphinscheduler.common.Constants.LOCAL_PARAMS; import static org.apache.dolphinscheduler.common.Constants.YYYY_MM_DD_HH_MM_SS; import static java.util.stream.Collectors.toSet; import com.fasterxml.jackson.core.type.TypeReference; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.enums.WarningType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.DateInterval; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.process.ResourceInfo; import org.apache.dolphinscheduler.common.task.AbstractParameters; import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter; import org.apache.dolphinscheduler.common.task.subprocess.SubProcessParameters; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.common.utils.TaskParametersUtils; import org.apache.dolphinscheduler.dao.entity.Command; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.DataSource; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ErrorCommand; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessInstanceMap; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.CommandMapper; import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper; import org.apache.dolphinscheduler.dao.mapper.EnvironmentMapper; import org.apache.dolphinscheduler.dao.mapper.ErrorCommandMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.service.log.LogClientService; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.facebook.presto.jdbc.internal.guava.collect.Lists; import com.fasterxml.jackson.databind.node.ObjectNode; /** * process relative dao that some mappers in this. */ @Component public class ProcessService { private final Logger logger = LoggerFactory.getLogger(getClass()); private final int[] stateArray = new int[]{ExecutionStatus.SUBMITTED_SUCCESS.ordinal(), ExecutionStatus.RUNNING_EXECUTION.ordinal(), ExecutionStatus.DELAY_EXECUTION.ordinal(), ExecutionStatus.READY_PAUSE.ordinal(), ExecutionStatus.READY_STOP.ordinal()}; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionMapper processDefineMapper; @Autowired private ProcessDefinitionLogMapper processDefineLogMapper; @Autowired private ProcessInstanceMapper processInstanceMapper; @Autowired private DataSourceMapper dataSourceMapper; @Autowired private ProcessInstanceMapMapper processInstanceMapMapper; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private CommandMapper commandMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private UdfFuncMapper udfFuncMapper; @Autowired private ResourceMapper resourceMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ErrorCommandMapper errorCommandMapper; @Autowired private TenantMapper tenantMapper; @Autowired private ProjectMapper projectMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired private EnvironmentMapper environmentMapper; /** * handle Command (construct ProcessInstance from Command) , wrapped in transaction * * @param logger logger * @param host host * @param validThreadNum validThreadNum * @param command found command * @return process instance */ @Transactional(rollbackFor = Exception.class) public ProcessInstance handleCommand(Logger logger, String host, int validThreadNum, Command command) { ProcessInstance processInstance = constructProcessInstance(command, host); // cannot construct process instance, return null if (processInstance == null) { logger.error("scan command, command parameter is error: {}", command); moveToErrorCommand(command, "process instance is null"); return null; } if (!checkThreadNum(command, validThreadNum)) { logger.info("there is not enough thread for this command: {}", command); return setWaitingThreadProcess(command, processInstance); } processInstance.setCommandType(command.getCommandType()); processInstance.addHistoryCmd(command.getCommandType()); saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); this.commandMapper.deleteById(command.getId()); return processInstance; } /** * save error command, and delete original command * * @param command command * @param message message */ @Transactional(rollbackFor = Exception.class) public void moveToErrorCommand(Command command, String message) { ErrorCommand errorCommand = new ErrorCommand(command, message); this.errorCommandMapper.insert(errorCommand); this.commandMapper.deleteById(command.getId()); } /** * set process waiting thread * * @param command command * @param processInstance processInstance * @return process instance */ private ProcessInstance setWaitingThreadProcess(Command command, ProcessInstance processInstance) { processInstance.setState(ExecutionStatus.WAITING_THREAD); if (command.getCommandType() != CommandType.RECOVER_WAITING_THREAD) { processInstance.addHistoryCmd(command.getCommandType()); } saveProcessInstance(processInstance); this.setSubProcessParam(processInstance); createRecoveryWaitingThreadCommand(command, processInstance); return null; } /** * check thread num * * @param command command * @param validThreadNum validThreadNum * @return if thread is enough */ private boolean checkThreadNum(Command command, int validThreadNum) { int commandThreadCount = this.workProcessThreadNumCount(command.getProcessDefinitionCode()); return validThreadNum >= commandThreadCount; } /** * insert one command * * @param command command * @return create result */ public int createCommand(Command command) { int result = 0; if (command != null) { result = commandMapper.insert(command); } return result; } /** * find one command from queue list * * @return command */ public Command findOneCommand() { return commandMapper.getOneToRun(); } /** * get command page * * @param pageSize * @param pageNumber * @return */ public List<Command> findCommandPage(int pageSize, int pageNumber) { Page<Command> commandPage = new Page<>(pageNumber, pageSize); return commandMapper.queryCommandPage(commandPage).getRecords(); } /** * check the input command exists in queue list * * @param command command * @return create command result */ public boolean verifyIsNeedCreateCommand(Command command) { boolean isNeedCreate = true; EnumMap<CommandType, Integer> cmdTypeMap = new EnumMap<>(CommandType.class); cmdTypeMap.put(CommandType.REPEAT_RUNNING, 1); cmdTypeMap.put(CommandType.RECOVER_SUSPENDED_PROCESS, 1); cmdTypeMap.put(CommandType.START_FAILURE_TASK_PROCESS, 1); CommandType commandType = command.getCommandType(); if (cmdTypeMap.containsKey(commandType)) { ObjectNode cmdParamObj = JSONUtils.parseObject(command.getCommandParam()); int processInstanceId = cmdParamObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt(); List<Command> commands = commandMapper.selectList(null); // for all commands for (Command tmpCommand : commands) { if (cmdTypeMap.containsKey(tmpCommand.getCommandType())) { ObjectNode tempObj = JSONUtils.parseObject(tmpCommand.getCommandParam()); if (tempObj != null && processInstanceId == tempObj.path(CMD_PARAM_RECOVER_PROCESS_ID_STRING).asInt()) { isNeedCreate = false; break; } } } } return isNeedCreate; } /** * find process instance detail by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceDetailById(int processId) { return processInstanceMapper.queryDetailById(processId); } /** * get task node list by definitionId */ public List<TaskDefinition> getTaskNodeListByDefinitionId(Integer defineId) { ProcessDefinition processDefinition = processDefineMapper.selectById(defineId); if (processDefinition == null) { logger.error("process define not exists"); return new ArrayList<>(); } List<ProcessTaskRelationLog> processTaskRelations = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelationLog processTaskRelation : processTaskRelations) { if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } List<TaskDefinitionLog> taskDefinitionLogs = taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); return new ArrayList<>(taskDefinitionLogs); } /** * find process instance by id * * @param processId processId * @return process instance */ public ProcessInstance findProcessInstanceById(int processId) { return processInstanceMapper.selectById(processId); } /** * find process define by id. * * @param processDefinitionId processDefinitionId * @return process definition */ public ProcessDefinition findProcessDefineById(int processDefinitionId) { return processDefineMapper.selectById(processDefinitionId); } /** * find process define by code and version. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinition(Long processDefinitionCode, int version) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); if (processDefinition == null || processDefinition.getVersion() != version) { processDefinition = processDefineLogMapper.queryByDefinitionCodeAndVersion(processDefinitionCode, version); if (processDefinition != null) { processDefinition.setId(0); } } return processDefinition; } /** * find process define by code. * * @param processDefinitionCode processDefinitionCode * @return process definition */ public ProcessDefinition findProcessDefinitionByCode(Long processDefinitionCode) { return processDefineMapper.queryByCode(processDefinitionCode); } /** * delete work process instance by id * * @param processInstanceId processInstanceId * @return delete process instance result */ public int deleteWorkProcessInstanceById(int processInstanceId) { return processInstanceMapper.deleteById(processInstanceId); } /** * delete all sub process by parent instance id * * @param processInstanceId processInstanceId * @return delete all sub process instance result */ public int deleteAllSubWorkProcessByParentId(int processInstanceId) { List<Integer> subProcessIdList = processInstanceMapMapper.querySubIdListByParentId(processInstanceId); for (Integer subId : subProcessIdList) { deleteAllSubWorkProcessByParentId(subId); deleteWorkProcessMapByParentId(subId); removeTaskLogFile(subId); deleteWorkProcessInstanceById(subId); } return 1; } /** * remove task log file * * @param processInstanceId processInstanceId */ public void removeTaskLogFile(Integer processInstanceId) { List<TaskInstance> taskInstanceList = findValidTaskListByProcessId(processInstanceId); if (CollectionUtils.isEmpty(taskInstanceList)) { return; } try (LogClientService logClient = new LogClientService()) { for (TaskInstance taskInstance : taskInstanceList) { String taskLogPath = taskInstance.getLogPath(); if (StringUtils.isEmpty(taskInstance.getHost())) { continue; } int port = Constants.RPC_PORT; String ip = ""; try { ip = Host.of(taskInstance.getHost()).getIp(); } catch (Exception e) { // compatible old version ip = taskInstance.getHost(); } // remove task log from loggerserver logClient.removeTaskLog(ip, port, taskLogPath); } } } /** * calculate sub process number in the process define. * * @param processDefinitionCode processDefinitionCode * @return process thread num count */ private Integer workProcessThreadNumCount(long processDefinitionCode) { ProcessDefinition processDefinition = processDefineMapper.queryByCode(processDefinitionCode); List<Integer> ids = new ArrayList<>(); recurseFindSubProcessId(processDefinition.getId(), ids); return ids.size() + 1; } /** * recursive query sub process definition id by parent id. * * @param parentId parentId * @param ids ids */ public void recurseFindSubProcessId(int parentId, List<Integer> ids) { List<TaskDefinition> taskNodeList = this.getTaskNodeListByDefinitionId(parentId); if (taskNodeList != null && !taskNodeList.isEmpty()) { for (TaskDefinition taskNode : taskNodeList) { String parameter = taskNode.getTaskParams(); ObjectNode parameterJson = JSONUtils.parseObject(parameter); if (parameterJson.get(CMD_PARAM_SUB_PROCESS_DEFINE_ID) != null) { SubProcessParameters subProcessParam = JSONUtils.parseObject(parameter, SubProcessParameters.class); ids.add(subProcessParam.getProcessDefinitionId()); recurseFindSubProcessId(subProcessParam.getProcessDefinitionId(), ids); } } } } /** * create recovery waiting thread command when thread pool is not enough for the process instance. * sub work process instance need not to create recovery command. * create recovery waiting thread command and delete origin command at the same time. * if the recovery command is exists, only update the field update_time * * @param originCommand originCommand * @param processInstance processInstance */ public void createRecoveryWaitingThreadCommand(Command originCommand, ProcessInstance processInstance) { // sub process doesnot need to create wait command if (processInstance.getIsSubProcess() == Flag.YES) { if (originCommand != null) { commandMapper.deleteById(originCommand.getId()); } return; } Map<String, String> cmdParam = new HashMap<>(); cmdParam.put(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD, String.valueOf(processInstance.getId())); // process instance quit by "waiting thread" state if (originCommand == null) { Command command = new Command( CommandType.RECOVER_WAITING_THREAD, processInstance.getTaskDependType(), processInstance.getFailureStrategy(), processInstance.getExecutorId(), processInstance.getProcessDefinition().getCode(), JSONUtils.toJsonString(cmdParam), processInstance.getWarningType(), processInstance.getWarningGroupId(), processInstance.getScheduleTime(), processInstance.getWorkerGroup(), processInstance.getEnvironmentCode(), processInstance.getProcessInstancePriority() ); saveCommand(command); return; } // update the command time if current command if recover from waiting if (originCommand.getCommandType() == CommandType.RECOVER_WAITING_THREAD) { originCommand.setUpdateTime(new Date()); saveCommand(originCommand); } else { // delete old command and create new waiting thread command commandMapper.deleteById(originCommand.getId()); originCommand.setId(0); originCommand.setCommandType(CommandType.RECOVER_WAITING_THREAD); originCommand.setUpdateTime(new Date()); originCommand.setCommandParam(JSONUtils.toJsonString(cmdParam)); originCommand.setProcessInstancePriority(processInstance.getProcessInstancePriority()); saveCommand(originCommand); } } /** * get schedule time from command * * @param command command * @param cmdParam cmdParam map * @return date */ private Date getScheduleTime(Command command, Map<String, String> cmdParam) { Date scheduleTime = command.getScheduleTime(); if (scheduleTime == null && cmdParam != null && cmdParam.containsKey(CMDPARAM_COMPLEMENT_DATA_START_DATE)) { scheduleTime = DateUtils.stringToDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE)); } return scheduleTime; } /** * generate a new work process instance from command. * * @param processDefinition processDefinition * @param command command * @param cmdParam cmdParam map * @return process instance */ private ProcessInstance generateNewProcessInstance(ProcessDefinition processDefinition, Command command, Map<String, String> cmdParam) { ProcessInstance processInstance = new ProcessInstance(processDefinition); processInstance.setProcessDefinitionCode(processDefinition.getCode()); processInstance.setProcessDefinitionVersion(processDefinition.getVersion()); processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setRecovery(Flag.NO); processInstance.setStartTime(new Date()); processInstance.setRunTimes(1); processInstance.setMaxTryTimes(0); //processInstance.setProcessDefinitionId(command.getProcessDefinitionId()); processInstance.setCommandParam(command.getCommandParam()); processInstance.setCommandType(command.getCommandType()); processInstance.setIsSubProcess(Flag.NO); processInstance.setTaskDependType(command.getTaskDependType()); processInstance.setFailureStrategy(command.getFailureStrategy()); processInstance.setExecutorId(command.getExecutorId()); WarningType warningType = command.getWarningType() == null ? WarningType.NONE : command.getWarningType(); processInstance.setWarningType(warningType); Integer warningGroupId = command.getWarningGroupId() == null ? 0 : command.getWarningGroupId(); processInstance.setWarningGroupId(warningGroupId); // schedule time Date scheduleTime = getScheduleTime(command, cmdParam); if (scheduleTime != null) { processInstance.setScheduleTime(scheduleTime); } processInstance.setCommandStartTime(command.getStartTime()); processInstance.setLocations(processDefinition.getLocations()); // reset global params while there are start parameters setGlobalParamIfCommanded(processDefinition, cmdParam); // curing global params processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), getCommandTypeIfComplement(processInstance, command), processInstance.getScheduleTime())); // set process instance priority processInstance.setProcessInstancePriority(command.getProcessInstancePriority()); String workerGroup = StringUtils.isBlank(command.getWorkerGroup()) ? Constants.DEFAULT_WORKER_GROUP : command.getWorkerGroup(); processInstance.setWorkerGroup(workerGroup); processInstance.setEnvironmentCode(Objects.isNull(command.getEnvironmentCode()) ? -1 : command.getEnvironmentCode()); processInstance.setTimeout(processDefinition.getTimeout()); processInstance.setTenantId(processDefinition.getTenantId()); return processInstance; } private void setGlobalParamIfCommanded(ProcessDefinition processDefinition, Map<String, String> cmdParam) { // get start params from command param Map<String, String> startParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_START_PARAMS)) { String startParamJson = cmdParam.get(Constants.CMD_PARAM_START_PARAMS); startParamMap = JSONUtils.toMap(startParamJson); } Map<String, String> fatherParamMap = new HashMap<>(); if (cmdParam != null && cmdParam.containsKey(Constants.CMD_PARAM_FATHER_PARAMS)) { String fatherParamJson = cmdParam.get(Constants.CMD_PARAM_FATHER_PARAMS); fatherParamMap = JSONUtils.toMap(fatherParamJson); } startParamMap.putAll(fatherParamMap); // set start param into global params if (startParamMap.size() > 0 && processDefinition.getGlobalParamMap() != null) { for (Map.Entry<String, String> param : processDefinition.getGlobalParamMap().entrySet()) { String val = startParamMap.get(param.getKey()); if (val != null) { param.setValue(val); } } } } /** * get process tenant * there is tenant id in definition, use the tenant of the definition. * if there is not tenant id in the definiton or the tenant not exist * use definition creator's tenant. * * @param tenantId tenantId * @param userId userId * @return tenant */ public Tenant getTenantForProcess(int tenantId, int userId) { Tenant tenant = null; if (tenantId >= 0) { tenant = tenantMapper.queryById(tenantId); } if (userId == 0) { return null; } if (tenant == null) { User user = userMapper.selectById(userId); tenant = tenantMapper.queryById(user.getTenantId()); } return tenant; } /** * get an environment * use the code of the environment to find a environment. * * @param environmentCode environmentCode * @return Environment */ public Environment findEnvironmentByCode(Long environmentCode) { Environment environment = null; if (environmentCode >= 0) { environment = environmentMapper.queryByEnvironmentCode(environmentCode); } return environment; } /** * check command parameters is valid * * @param command command * @param cmdParam cmdParam map * @return whether command param is valid */ private Boolean checkCmdParam(Command command, Map<String, String> cmdParam) { if (command.getTaskDependType() == TaskDependType.TASK_ONLY || command.getTaskDependType() == TaskDependType.TASK_PRE) { if (cmdParam == null || !cmdParam.containsKey(Constants.CMD_PARAM_START_NODE_NAMES) || cmdParam.get(Constants.CMD_PARAM_START_NODE_NAMES).isEmpty()) { logger.error("command node depend type is {}, but start nodes is null ", command.getTaskDependType()); return false; } } return true; } /** * construct process instance according to one command. * * @param command command * @param host host * @return process instance */ private ProcessInstance constructProcessInstance(Command command, String host) { ProcessInstance processInstance; CommandType commandType = command.getCommandType(); Map<String, String> cmdParam = JSONUtils.toMap(command.getCommandParam()); ProcessDefinition processDefinition = getProcessDefinitionByCommand(command.getProcessDefinitionCode(), cmdParam); if (processDefinition == null) { logger.error("cannot find the work process define! define code : {}", command.getProcessDefinitionCode()); return null; } if (cmdParam != null) { int processInstanceId = 0; // recover from failure or pause tasks if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { String processId = cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING); processInstanceId = Integer.parseInt(processId); if (processInstanceId == 0) { logger.error("command parameter is error, [ ProcessInstanceId ] is 0"); return null; } } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { // sub process map String pId = cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS); processInstanceId = Integer.parseInt(pId); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { // waiting thread command String pId = cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD); processInstanceId = Integer.parseInt(pId); } if (processInstanceId == 0) { processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } else { processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return processInstance; } CommandType commandTypeIfComplement = getCommandTypeIfComplement(processInstance, command); // reset global params while repeat running is needed by cmdParam if (commandTypeIfComplement == CommandType.REPEAT_RUNNING) { setGlobalParamIfCommanded(processDefinition, cmdParam); } // Recalculate global parameters after rerun. processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), commandTypeIfComplement, processInstance.getScheduleTime())); processInstance.setProcessDefinition(processDefinition); } //reset command parameter if (processInstance.getCommandParam() != null) { Map<String, String> processCmdParam = JSONUtils.toMap(processInstance.getCommandParam()); for (Map.Entry<String, String> entry : processCmdParam.entrySet()) { if (!cmdParam.containsKey(entry.getKey())) { cmdParam.put(entry.getKey(), entry.getValue()); } } } // reset command parameter if sub process if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstance.setCommandParam(command.getCommandParam()); } } else { // generate one new process instance processInstance = generateNewProcessInstance(processDefinition, command, cmdParam); } if (Boolean.FALSE.equals(checkCmdParam(command, cmdParam))) { logger.error("command parameter check failed!"); return null; } if (command.getScheduleTime() != null) { processInstance.setScheduleTime(command.getScheduleTime()); } processInstance.setHost(host); ExecutionStatus runStatus = ExecutionStatus.RUNNING_EXECUTION; int runTime = processInstance.getRunTimes(); switch (commandType) { case START_PROCESS: break; case START_FAILURE_TASK_PROCESS: // find failed tasks and init these tasks List<Integer> failedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.FAILURE); List<Integer> toleranceList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.NEED_FAULT_TOLERANCE); List<Integer> killedList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); failedList.addAll(killedList); failedList.addAll(toleranceList); for (Integer taskId : failedList) { initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(Constants.COMMA, convertIntListToString(failedList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case START_CURRENT_TASK_PROCESS: break; case RECOVER_WAITING_THREAD: break; case RECOVER_SUSPENDED_PROCESS: // find pause tasks and init task's state cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); List<Integer> suspendedNodeList = this.findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.PAUSE); List<Integer> stopNodeList = findTaskIdByInstanceState(processInstance.getId(), ExecutionStatus.KILL); suspendedNodeList.addAll(stopNodeList); for (Integer taskId : suspendedNodeList) { // initialize the pause state initTaskInstance(this.findTaskInstanceById(taskId)); } cmdParam.put(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING, String.join(",", convertIntListToString(suspendedNodeList))); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); processInstance.setRunTimes(runTime + 1); break; case RECOVER_TOLERANCE_FAULT_PROCESS: // recover tolerance fault process processInstance.setRecovery(Flag.YES); runStatus = processInstance.getState(); break; case COMPLEMENT_DATA: // delete all the valid tasks when complement data List<TaskInstance> taskInstanceList = this.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { taskInstance.setFlag(Flag.NO); this.updateTaskInstance(taskInstance); } initComplementDataParam(processDefinition, processInstance, cmdParam); break; case REPEAT_RUNNING: // delete the recover task names from command parameter if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } // delete all the valid tasks when repeat running List<TaskInstance> validTaskList = findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance taskInstance : validTaskList) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); } processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processInstance.setRunTimes(runTime + 1); initComplementDataParam(processDefinition, processInstance, cmdParam); break; case SCHEDULER: break; default: break; } processInstance.setState(runStatus); return processInstance; } /** * get process definition by command * If it is a fault-tolerant command, get the specified version of ProcessDefinition through ProcessInstance * Otherwise, get the latest version of ProcessDefinition * * @return ProcessDefinition */ private ProcessDefinition getProcessDefinitionByCommand(long processDefinitionCode, Map<String, String> cmdParam) { if (cmdParam != null) { int processInstanceId = 0; if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_SUB_PROCESS)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_SUB_PROCESS)); } else if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)) { processInstanceId = Integer.parseInt(cmdParam.get(Constants.CMD_PARAM_RECOVERY_WAITING_THREAD)); } if (processInstanceId != 0) { ProcessInstance processInstance = this.findProcessInstanceDetailById(processInstanceId); if (processInstance == null) { return null; } return processDefineLogMapper.queryByDefinitionCodeAndVersion( processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); } } return processDefineMapper.queryByCode(processDefinitionCode); } /** * return complement data if the process start with complement data * * @param processInstance processInstance * @param command command * @return command type */ private CommandType getCommandTypeIfComplement(ProcessInstance processInstance, Command command) { if (CommandType.COMPLEMENT_DATA == processInstance.getCmdTypeIfComplement()) { return CommandType.COMPLEMENT_DATA; } else { return command.getCommandType(); } } /** * initialize complement data parameters * * @param processDefinition processDefinition * @param processInstance processInstance * @param cmdParam cmdParam */ private void initComplementDataParam(ProcessDefinition processDefinition, ProcessInstance processInstance, Map<String, String> cmdParam) { if (!processInstance.isComplementData()) { return; } Date startComplementTime = DateUtils.parse(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE), YYYY_MM_DD_HH_MM_SS); if (Flag.NO == processInstance.getIsSubProcess()) { processInstance.setScheduleTime(startComplementTime); } processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); } /** * set sub work process parameters. * handle sub work process instance, update relation table and command parameters * set sub work process flag, extends parent work process command parameters * * @param subProcessInstance subProcessInstance */ public void setSubProcessParam(ProcessInstance subProcessInstance) { String cmdParam = subProcessInstance.getCommandParam(); if (StringUtils.isEmpty(cmdParam)) { return; } Map<String, String> paramMap = JSONUtils.toMap(cmdParam); // write sub process id into cmd param. if (paramMap.containsKey(CMD_PARAM_SUB_PROCESS) && CMD_PARAM_EMPTY_SUB_PROCESS.equals(paramMap.get(CMD_PARAM_SUB_PROCESS))) { paramMap.remove(CMD_PARAM_SUB_PROCESS); paramMap.put(CMD_PARAM_SUB_PROCESS, String.valueOf(subProcessInstance.getId())); subProcessInstance.setCommandParam(JSONUtils.toJsonString(paramMap)); subProcessInstance.setIsSubProcess(Flag.YES); this.saveProcessInstance(subProcessInstance); } // copy parent instance user def params to sub process.. String parentInstanceId = paramMap.get(CMD_PARAM_SUB_PROCESS_PARENT_INSTANCE_ID); if (StringUtils.isNotEmpty(parentInstanceId)) { ProcessInstance parentInstance = findProcessInstanceDetailById(Integer.parseInt(parentInstanceId)); if (parentInstance != null) { subProcessInstance.setGlobalParams( joinGlobalParams(parentInstance.getGlobalParams(), subProcessInstance.getGlobalParams())); this.saveProcessInstance(subProcessInstance); } else { logger.error("sub process command params error, cannot find parent instance: {} ", cmdParam); } } ProcessInstanceMap processInstanceMap = JSONUtils.parseObject(cmdParam, ProcessInstanceMap.class); if (processInstanceMap == null || processInstanceMap.getParentProcessInstanceId() == 0) { return; } // update sub process id to process map table processInstanceMap.setProcessInstanceId(subProcessInstance.getId()); this.updateWorkProcessInstanceMap(processInstanceMap); } /** * join parent global params into sub process. * only the keys doesn't in sub process global would be joined. * * @param parentGlobalParams parentGlobalParams * @param subGlobalParams subGlobalParams * @return global params join */ private String joinGlobalParams(String parentGlobalParams, String subGlobalParams) { List<Property> parentPropertyList = JSONUtils.toList(parentGlobalParams, Property.class); List<Property> subPropertyList = JSONUtils.toList(subGlobalParams, Property.class); Map<String, String> subMap = subPropertyList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); for (Property parent : parentPropertyList) { if (!subMap.containsKey(parent.getProp())) { subPropertyList.add(parent); } } return JSONUtils.toJsonString(subPropertyList); } /** * initialize task instance * * @param taskInstance taskInstance */ private void initTaskInstance(TaskInstance taskInstance) { if (!taskInstance.isSubProcess() && (taskInstance.getState().typeIsCancel() || taskInstance.getState().typeIsFailure())) { taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); return; } taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); updateTaskInstance(taskInstance); } /** * retry submit task to db * * @param taskInstance * @param commitRetryTimes * @param commitInterval * @return */ public TaskInstance submitTask(TaskInstance taskInstance, int commitRetryTimes, int commitInterval) { int retryTimes = 1; boolean submitDB = false; TaskInstance task = null; while (retryTimes <= commitRetryTimes) { try { if (!submitDB) { // submit task to db task = submitTask(taskInstance); if (task != null && task.getId() != 0) { submitDB = true; break; } } if (!submitDB) { logger.error("task commit to db failed , taskId {} has already retry {} times, please check the database", taskInstance.getId(), retryTimes); } Thread.sleep(commitInterval); } catch (Exception e) { logger.error("task commit to mysql failed", e); } retryTimes += 1; } return task; } /** * submit task to db * submit sub process to command * * @param taskInstance taskInstance * @return task instance */ @Transactional(rollbackFor = Exception.class) public TaskInstance submitTask(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); logger.info("start submit task : {}, instance id:{}, state: {}", taskInstance.getName(), taskInstance.getProcessInstanceId(), processInstance.getState()); //submit to db TaskInstance task = submitTaskInstanceToDB(taskInstance, processInstance); if (task == null) { logger.error("end submit task to db error, task name:{}, process id:{} state: {} ", taskInstance.getName(), taskInstance.getProcessInstance(), processInstance.getState()); return task; } if (!task.getState().typeIsFinished()) { createSubWorkProcess(processInstance, task); } logger.info("end submit task to db successfully:{} {} state:{} complete, instance id:{} state: {} ", taskInstance.getId(), taskInstance.getName(), task.getState(), processInstance.getId(), processInstance.getState()); return task; } /** * set work process instance map * consider o * repeat running does not generate new sub process instance * set map {parent instance id, task instance id, 0(child instance id)} * * @param parentInstance parentInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap setProcessInstanceMap(ProcessInstance parentInstance, TaskInstance parentTask) { ProcessInstanceMap processMap = findWorkProcessMapByParent(parentInstance.getId(), parentTask.getId()); if (processMap != null) { return processMap; } if (parentInstance.getCommandType() == CommandType.REPEAT_RUNNING) { // update current task id to map processMap = findPreviousTaskProcessMap(parentInstance, parentTask); if (processMap != null) { processMap.setParentTaskInstanceId(parentTask.getId()); updateWorkProcessInstanceMap(processMap); return processMap; } } // new task processMap = new ProcessInstanceMap(); processMap.setParentProcessInstanceId(parentInstance.getId()); processMap.setParentTaskInstanceId(parentTask.getId()); createWorkProcessInstanceMap(processMap); return processMap; } /** * find previous task work process map. * * @param parentProcessInstance parentProcessInstance * @param parentTask parentTask * @return process instance map */ private ProcessInstanceMap findPreviousTaskProcessMap(ProcessInstance parentProcessInstance, TaskInstance parentTask) { Integer preTaskId = 0; List<TaskInstance> preTaskList = this.findPreviousTaskListByWorkProcessId(parentProcessInstance.getId()); for (TaskInstance task : preTaskList) { if (task.getName().equals(parentTask.getName())) { preTaskId = task.getId(); ProcessInstanceMap map = findWorkProcessMapByParent(parentProcessInstance.getId(), preTaskId); if (map != null) { return map; } } } logger.info("sub process instance is not found,parent task:{},parent instance:{}", parentTask.getId(), parentProcessInstance.getId()); return null; } /** * create sub work process command * * @param parentProcessInstance parentProcessInstance * @param task task */ public void createSubWorkProcess(ProcessInstance parentProcessInstance, TaskInstance task) { if (!task.isSubProcess()) { return; } //check create sub work flow firstly ProcessInstanceMap instanceMap = findWorkProcessMapByParent(parentProcessInstance.getId(), task.getId()); if (null != instanceMap && CommandType.RECOVER_TOLERANCE_FAULT_PROCESS == parentProcessInstance.getCommandType()) { // recover failover tolerance would not create a new command when the sub command already have been created return; } instanceMap = setProcessInstanceMap(parentProcessInstance, task); ProcessInstance childInstance = null; if (instanceMap.getProcessInstanceId() != 0) { childInstance = findProcessInstanceById(instanceMap.getProcessInstanceId()); } Command subProcessCommand = createSubProcessCommand(parentProcessInstance, childInstance, instanceMap, task); updateSubProcessDefinitionByParent(parentProcessInstance, subProcessCommand.getProcessDefinitionCode()); initSubInstanceState(childInstance); createCommand(subProcessCommand); logger.info("sub process command created: {} ", subProcessCommand); } /** * complement data needs transform parent parameter to child. */ private String getSubWorkFlowParam(ProcessInstanceMap instanceMap, ProcessInstance parentProcessInstance, Map<String, String> fatherParams) { // set sub work process command String processMapStr = JSONUtils.toJsonString(instanceMap); Map<String, String> cmdParam = JSONUtils.toMap(processMapStr); if (parentProcessInstance.isComplementData()) { Map<String, String> parentParam = JSONUtils.toMap(parentProcessInstance.getCommandParam()); String endTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE); String startTime = parentParam.get(CMDPARAM_COMPLEMENT_DATA_START_DATE); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_END_DATE, endTime); cmdParam.put(CMDPARAM_COMPLEMENT_DATA_START_DATE, startTime); processMapStr = JSONUtils.toJsonString(cmdParam); } if (fatherParams.size() != 0) { cmdParam.put(CMD_PARAM_FATHER_PARAMS, JSONUtils.toJsonString(fatherParams)); processMapStr = JSONUtils.toJsonString(cmdParam); } return processMapStr; } public Map<String, String> getGlobalParamMap(String globalParams) { List<Property> propList; Map<String, String> globalParamMap = new HashMap<>(); if (StringUtils.isNotEmpty(globalParams)) { propList = JSONUtils.toList(globalParams, Property.class); globalParamMap = propList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue)); } return globalParamMap; } /** * create sub work process command */ public Command createSubProcessCommand(ProcessInstance parentProcessInstance, ProcessInstance childInstance, ProcessInstanceMap instanceMap, TaskInstance task) { CommandType commandType = getSubCommandType(parentProcessInstance, childInstance); Map<String, String> subProcessParam = JSONUtils.toMap(task.getTaskParams()); int childDefineId = Integer.parseInt(subProcessParam.get(Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID)); ProcessDefinition processDefinition = processDefineMapper.queryByDefineId(childDefineId); Object localParams = subProcessParam.get(Constants.LOCAL_PARAMS); List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> globalMap = this.getGlobalParamMap(parentProcessInstance.getGlobalParams()); Map<String, String> fatherParams = new HashMap<>(); if (CollectionUtils.isNotEmpty(allParam)) { for (Property info : allParam) { fatherParams.put(info.getProp(), globalMap.get(info.getProp())); } } String processParam = getSubWorkFlowParam(instanceMap, parentProcessInstance, fatherParams); return new Command( commandType, TaskDependType.TASK_POST, parentProcessInstance.getFailureStrategy(), parentProcessInstance.getExecutorId(), processDefinition.getCode(), processParam, parentProcessInstance.getWarningType(), parentProcessInstance.getWarningGroupId(), parentProcessInstance.getScheduleTime(), task.getWorkerGroup(), task.getEnvironmentCode(), parentProcessInstance.getProcessInstancePriority() ); } /** * initialize sub work flow state * child instance state would be initialized when 'recovery from pause/stop/failure' */ private void initSubInstanceState(ProcessInstance childInstance) { if (childInstance != null) { childInstance.setState(ExecutionStatus.RUNNING_EXECUTION); updateProcessInstance(childInstance); } } /** * get sub work flow command type * child instance exist: child command = fatherCommand * child instance not exists: child command = fatherCommand[0] */ private CommandType getSubCommandType(ProcessInstance parentProcessInstance, ProcessInstance childInstance) { CommandType commandType = parentProcessInstance.getCommandType(); if (childInstance == null) { String fatherHistoryCommand = parentProcessInstance.getHistoryCmd(); commandType = CommandType.valueOf(fatherHistoryCommand.split(Constants.COMMA)[0]); } return commandType; } /** * update sub process definition * * @param parentProcessInstance parentProcessInstance * @param childDefinitionCode childDefinitionId */ private void updateSubProcessDefinitionByParent(ProcessInstance parentProcessInstance, long childDefinitionCode) { ProcessDefinition fatherDefinition = this.findProcessDefinition(parentProcessInstance.getProcessDefinitionCode(), parentProcessInstance.getProcessDefinitionVersion()); ProcessDefinition childDefinition = this.findProcessDefinitionByCode(childDefinitionCode); if (childDefinition != null && fatherDefinition != null) { childDefinition.setWarningGroupId(fatherDefinition.getWarningGroupId()); processDefineMapper.updateById(childDefinition); } } /** * submit task to mysql * * @param taskInstance taskInstance * @param processInstance processInstance * @return task instance */ public TaskInstance submitTaskInstanceToDB(TaskInstance taskInstance, ProcessInstance processInstance) { ExecutionStatus processInstanceState = processInstance.getState(); if (taskInstance.getState().typeIsFailure()) { if (taskInstance.isSubProcess()) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } else { if (processInstanceState != ExecutionStatus.READY_STOP && processInstanceState != ExecutionStatus.READY_PAUSE) { // failure task set invalid taskInstance.setFlag(Flag.NO); updateTaskInstance(taskInstance); // crate new task instance if (taskInstance.getState() != ExecutionStatus.NEED_FAULT_TOLERANCE) { taskInstance.setRetryTimes(taskInstance.getRetryTimes() + 1); } taskInstance.setSubmitTime(null); taskInstance.setStartTime(null); taskInstance.setEndTime(null); taskInstance.setFlag(Flag.YES); taskInstance.setHost(null); taskInstance.setId(0); } } } taskInstance.setExecutorId(processInstance.getExecutorId()); taskInstance.setProcessInstancePriority(processInstance.getProcessInstancePriority()); taskInstance.setState(getSubmitTaskState(taskInstance, processInstanceState)); if (taskInstance.getSubmitTime() == null) { taskInstance.setSubmitTime(new Date()); } if (taskInstance.getFirstSubmitTime() == null) { taskInstance.setFirstSubmitTime(taskInstance.getSubmitTime()); } boolean saveResult = saveTaskInstance(taskInstance); if (!saveResult) { return null; } return taskInstance; } /** * get submit task instance state by the work process state * cannot modify the task state when running/kill/submit success, or this * task instance is already exists in task queue . * return pause if work process state is ready pause * return stop if work process state is ready stop * if all of above are not satisfied, return submit success * * @param taskInstance taskInstance * @param processInstanceState processInstanceState * @return process instance state */ public ExecutionStatus getSubmitTaskState(TaskInstance taskInstance, ExecutionStatus processInstanceState) { ExecutionStatus state = taskInstance.getState(); // running, delayed or killed // the task already exists in task queue // return state if ( state == ExecutionStatus.RUNNING_EXECUTION || state == ExecutionStatus.DELAY_EXECUTION || state == ExecutionStatus.KILL ) { return state; } //return pasue /stop if process instance state is ready pause / stop // or return submit success if (processInstanceState == ExecutionStatus.READY_PAUSE) { state = ExecutionStatus.PAUSE; } else if (processInstanceState == ExecutionStatus.READY_STOP || !checkProcessStrategy(taskInstance)) { state = ExecutionStatus.KILL; } else { state = ExecutionStatus.SUBMITTED_SUCCESS; } return state; } /** * check process instance strategy * * @param taskInstance taskInstance * @return check strategy result */ private boolean checkProcessStrategy(TaskInstance taskInstance) { ProcessInstance processInstance = this.findProcessInstanceById(taskInstance.getProcessInstanceId()); FailureStrategy failureStrategy = processInstance.getFailureStrategy(); if (failureStrategy == FailureStrategy.CONTINUE) { return true; } List<TaskInstance> taskInstances = this.findValidTaskListByProcessId(taskInstance.getProcessInstanceId()); for (TaskInstance task : taskInstances) { if (task.getState() == ExecutionStatus.FAILURE && task.getRetryTimes() >= task.getMaxRetryTimes()) { return false; } } return true; } /** * insert or update work process instance to data base * * @param processInstance processInstance */ public void saveProcessInstance(ProcessInstance processInstance) { if (processInstance == null) { logger.error("save error, process instance is null!"); return; } if (processInstance.getId() != 0) { processInstanceMapper.updateById(processInstance); } else { processInstanceMapper.insert(processInstance); } } /** * insert or update command * * @param command command * @return save command result */ public int saveCommand(Command command) { if (command.getId() != 0) { return commandMapper.updateById(command); } else { return commandMapper.insert(command); } } /** * insert or update task instance * * @param taskInstance taskInstance * @return save task instance result */ public boolean saveTaskInstance(TaskInstance taskInstance) { if (taskInstance.getId() != 0) { return updateTaskInstance(taskInstance); } else { return createTaskInstance(taskInstance); } } /** * insert task instance * * @param taskInstance taskInstance * @return create task instance result */ public boolean createTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.insert(taskInstance); return count > 0; } /** * update task instance * * @param taskInstance taskInstance * @return update task instance result */ public boolean updateTaskInstance(TaskInstance taskInstance) { int count = taskInstanceMapper.updateById(taskInstance); return count > 0; } /** * find task instance by id * * @param taskId task id * @return task intance */ public TaskInstance findTaskInstanceById(Integer taskId) { return taskInstanceMapper.selectById(taskId); } /** * package task instance,associate processInstance and processDefine * * @param taskInstId taskInstId * @return task instance */ public TaskInstance getTaskInstanceDetailByTaskId(int taskInstId) { // get task instance TaskInstance taskInstance = findTaskInstanceById(taskInstId); if (taskInstance == null) { return null; } // get process instance ProcessInstance processInstance = findProcessInstanceDetailById(taskInstance.getProcessInstanceId()); // get process define ProcessDefinition processDefine = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); taskInstance.setProcessInstance(processInstance); taskInstance.setProcessDefine(processDefine); TaskDefinition taskDefinition = taskDefinitionLogMapper.queryByDefinitionCodeAndVersion( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); updateTaskDefinitionResources(taskDefinition); taskInstance.setTaskDefine(taskDefinition); return taskInstance; } /** * Update {@link ResourceInfo} information in {@link TaskDefinition} * * @param taskDefinition the given {@link TaskDefinition} */ private void updateTaskDefinitionResources(TaskDefinition taskDefinition) { Map<String, Object> taskParameters = JSONUtils.parseObject( taskDefinition.getTaskParams(), new TypeReference<Map<String, Object>>() { }); if (taskParameters != null) { // if contains mainJar field, query resource from database // Flink, Spark, MR if (taskParameters.containsKey("mainJar")) { Object mainJarObj = taskParameters.get("mainJar"); ResourceInfo mainJar = JSONUtils.parseObject( JSONUtils.toJsonString(mainJarObj), ResourceInfo.class); ResourceInfo resourceInfo = updateResourceInfo(mainJar); if (resourceInfo != null) { taskParameters.put("mainJar", resourceInfo); } } // update resourceList information if (taskParameters.containsKey("resourceList")) { String resourceListStr = JSONUtils.toJsonString(taskParameters.get("resourceList")); List<ResourceInfo> resourceInfos = JSONUtils.toList(resourceListStr, ResourceInfo.class); List<ResourceInfo> updatedResourceInfos = resourceInfos .stream() .map(this::updateResourceInfo) .filter(Objects::nonNull) .collect(Collectors.toList()); taskParameters.put("resourceList", updatedResourceInfos); } // set task parameters taskDefinition.setTaskParams(JSONUtils.toJsonString(taskParameters)); } } /** * update {@link ResourceInfo} by given original ResourceInfo * * @param res origin resource info * @return {@link ResourceInfo} */ private ResourceInfo updateResourceInfo(ResourceInfo res) { ResourceInfo resourceInfo = null; // only if mainJar is not null and does not contains "resourceName" field if (res != null) { int resourceId = res.getId(); if (resourceId <= 0) { logger.error("invalid resourceId, {}", resourceId); return null; } resourceInfo = new ResourceInfo(); // get resource from database, only one resource should be returned Resource resource = getResourceById(resourceId); resourceInfo.setId(resourceId); resourceInfo.setRes(resource.getFileName()); resourceInfo.setResourceName(resource.getFullName()); if (logger.isInfoEnabled()) { logger.info("updated resource info {}", JSONUtils.toJsonString(resourceInfo)); } } return resourceInfo; } /** * get id list by task state * * @param instanceId instanceId * @param state state * @return task instance states */ public List<Integer> findTaskIdByInstanceState(int instanceId, ExecutionStatus state) { return taskInstanceMapper.queryTaskByProcessIdAndState(instanceId, state.ordinal()); } /** * find valid task list by process definition id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findValidTaskListByProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.YES); } /** * find previous task list by work process id * * @param processInstanceId processInstanceId * @return task instance list */ public List<TaskInstance> findPreviousTaskListByWorkProcessId(Integer processInstanceId) { return taskInstanceMapper.findValidTaskListByProcessId(processInstanceId, Flag.NO); } /** * update work process instance map * * @param processInstanceMap processInstanceMap * @return update process instance result */ public int updateWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { return processInstanceMapMapper.updateById(processInstanceMap); } /** * create work process instance map * * @param processInstanceMap processInstanceMap * @return create process instance result */ public int createWorkProcessInstanceMap(ProcessInstanceMap processInstanceMap) { int count = 0; if (processInstanceMap != null) { return processInstanceMapMapper.insert(processInstanceMap); } return count; } /** * find work process map by parent process id and parent task id. * * @param parentWorkProcessId parentWorkProcessId * @param parentTaskId parentTaskId * @return process instance map */ public ProcessInstanceMap findWorkProcessMapByParent(Integer parentWorkProcessId, Integer parentTaskId) { return processInstanceMapMapper.queryByParentId(parentWorkProcessId, parentTaskId); } /** * delete work process map by parent process id * * @param parentWorkProcessId parentWorkProcessId * @return delete process map result */ public int deleteWorkProcessMapByParentId(int parentWorkProcessId) { return processInstanceMapMapper.deleteByParentProcessId(parentWorkProcessId); } /** * find sub process instance * * @param parentProcessId parentProcessId * @param parentTaskId parentTaskId * @return process instance */ public ProcessInstance findSubProcessInstance(Integer parentProcessId, Integer parentTaskId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryByParentId(parentProcessId, parentTaskId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getProcessInstanceId()); return processInstance; } /** * find parent process instance * * @param subProcessId subProcessId * @return process instance */ public ProcessInstance findParentProcessInstance(Integer subProcessId) { ProcessInstance processInstance = null; ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(subProcessId); if (processInstanceMap == null || processInstanceMap.getProcessInstanceId() == 0) { return processInstance; } processInstance = findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); return processInstance; } /** * change task state * * @param state state * @param startTime startTime * @param host host * @param executePath executePath * @param logPath logPath * @param taskInstId taskInstId */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date startTime, String host, String executePath, String logPath, int taskInstId) { taskInstance.setState(state); taskInstance.setStartTime(startTime); taskInstance.setHost(host); taskInstance.setExecutePath(executePath); taskInstance.setLogPath(logPath); saveTaskInstance(taskInstance); } /** * update process instance * * @param processInstance processInstance * @return update process instance result */ public int updateProcessInstance(ProcessInstance processInstance) { return processInstanceMapper.updateById(processInstance); } /** * change task state * * @param state state * @param endTime endTime * @param taskInstId taskInstId * @param varPool varPool */ public void changeTaskState(TaskInstance taskInstance, ExecutionStatus state, Date endTime, int processId, String appIds, int taskInstId, String varPool) { taskInstance.setPid(processId); taskInstance.setAppLink(appIds); taskInstance.setState(state); taskInstance.setEndTime(endTime); taskInstance.setVarPool(varPool); changeOutParam(taskInstance); saveTaskInstance(taskInstance); } /** * for show in page of taskInstance * * @param taskInstance */ public void changeOutParam(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getVarPool())) { return; } List<Property> properties = JSONUtils.toList(taskInstance.getVarPool(), Property.class); if (CollectionUtils.isEmpty(properties)) { return; } //if the result more than one line,just get the first . Map<String, Object> taskParams = JSONUtils.parseObject(taskInstance.getTaskParams(), new TypeReference<Map<String, Object>>() {}); Object localParams = taskParams.get(LOCAL_PARAMS); if (localParams == null) { return; } List<Property> allParam = JSONUtils.toList(JSONUtils.toJsonString(localParams), Property.class); Map<String, String> outProperty = new HashMap<>(); for (Property info : properties) { if (info.getDirect() == Direct.OUT) { outProperty.put(info.getProp(), info.getValue()); } } for (Property info : allParam) { if (info.getDirect() == Direct.OUT) { String paramName = info.getProp(); info.setValue(outProperty.get(paramName)); } } taskParams.put(LOCAL_PARAMS, allParam); taskInstance.setTaskParams(JSONUtils.toJsonString(taskParams)); } /** * convert integer list to string list * * @param intList intList * @return string list */ public List<String> convertIntListToString(List<Integer> intList) { if (intList == null) { return new ArrayList<>(); } List<String> result = new ArrayList<>(intList.size()); for (Integer intVar : intList) { result.add(String.valueOf(intVar)); } return result; } /** * query schedule by id * * @param id id * @return schedule */ public Schedule querySchedule(int id) { return scheduleMapper.selectById(id); } /** * query Schedule by processDefinitionCode * * @param processDefinitionCode processDefinitionCode * @see Schedule */ public List<Schedule> queryReleaseSchedulerListByProcessDefinitionCode(long processDefinitionCode) { return scheduleMapper.queryReleaseSchedulerListByProcessDefinitionCode(processDefinitionCode); } /** * query need failover process instance * * @param host host * @return process instance list */ public List<ProcessInstance> queryNeedFailoverProcessInstances(String host) { return processInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * process need failover process instance * * @param processInstance processInstance */ @Transactional(rollbackFor = RuntimeException.class) public void processNeedFailoverProcessInstances(ProcessInstance processInstance) { //1 update processInstance host is null processInstance.setHost(Constants.NULL); processInstanceMapper.updateById(processInstance); ProcessDefinition processDefinition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); //2 insert into recover command Command cmd = new Command(); cmd.setProcessDefinitionCode(processDefinition.getCode()); cmd.setCommandParam(String.format("{\"%s\":%d}", Constants.CMD_PARAM_RECOVER_PROCESS_ID_STRING, processInstance.getId())); cmd.setExecutorId(processInstance.getExecutorId()); cmd.setCommandType(CommandType.RECOVER_TOLERANCE_FAULT_PROCESS); createCommand(cmd); } /** * query all need failover task instances by host * * @param host host * @return task instance list */ public List<TaskInstance> queryNeedFailoverTaskInstances(String host) { return taskInstanceMapper.queryByHostAndStatus(host, stateArray); } /** * find data source by id * * @param id id * @return datasource */ public DataSource findDataSourceById(int id) { return dataSourceMapper.selectById(id); } /** * update process instance state by id * * @param processInstanceId processInstanceId * @param executionStatus executionStatus * @return update process result */ public int updateProcessInstanceState(Integer processInstanceId, ExecutionStatus executionStatus) { ProcessInstance instance = processInstanceMapper.selectById(processInstanceId); instance.setState(executionStatus); return processInstanceMapper.updateById(instance); } /** * find process instance by the task id * * @param taskId taskId * @return process instance */ public ProcessInstance findProcessInstanceByTaskId(int taskId) { TaskInstance taskInstance = taskInstanceMapper.selectById(taskId); if (taskInstance != null) { return processInstanceMapper.selectById(taskInstance.getProcessInstanceId()); } return null; } /** * find udf function list by id list string * * @param ids ids * @return udf function list */ public List<UdfFunc> queryUdfFunListByIds(int[] ids) { return udfFuncMapper.queryUdfByIdStr(ids, null); } /** * find tenant code by resource name * * @param resName resource name * @param resourceType resource type * @return tenant code */ public String queryTenantCodeByResName(String resName, ResourceType resourceType) { // in order to query tenant code successful although the version is older String fullName = resName.startsWith("/") ? resName : String.format("/%s", resName); List<Resource> resourceList = resourceMapper.queryResource(fullName, resourceType.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { return StringUtils.EMPTY; } int userId = resourceList.get(0).getUserId(); User user = userMapper.selectById(userId); if (Objects.isNull(user)) { return StringUtils.EMPTY; } Tenant tenant = tenantMapper.selectById(user.getTenantId()); if (Objects.isNull(tenant)) { return StringUtils.EMPTY; } return tenant.getTenantCode(); } /** * find schedule list by process define codes. * * @param codes codes * @return schedule list */ public List<Schedule> selectAllByProcessDefineCode(long[] codes) { return scheduleMapper.selectAllByProcessDefineArray(codes); } /** * find last scheduler process instance in the date interval * * @param definitionCode definitionCode * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastSchedulerProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastSchedulerProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last manual process instance interval * * @param definitionCode process definition code * @param dateInterval dateInterval * @return process instance */ public ProcessInstance findLastManualProcessInterval(Long definitionCode, DateInterval dateInterval) { return processInstanceMapper.queryLastManualProcess(definitionCode, dateInterval.getStartTime(), dateInterval.getEndTime()); } /** * find last running process instance * * @param definitionCode process definition code * @param startTime start time * @param endTime end time * @return process instance */ public ProcessInstance findLastRunningProcess(Long definitionCode, Date startTime, Date endTime) { return processInstanceMapper.queryLastRunningProcess(definitionCode, startTime, endTime, stateArray); } /** * query user queue by process instance id * * @param processInstanceId processInstanceId * @return queue */ public String queryUserQueueByProcessInstanceId(int processInstanceId) { String queue = ""; ProcessInstance processInstance = processInstanceMapper.selectById(processInstanceId); if (processInstance == null) { return queue; } User executor = userMapper.selectById(processInstance.getExecutorId()); if (executor != null) { queue = executor.getQueue(); } return queue; } /** * query project name and user name by processInstanceId. * * @param processInstanceId processInstanceId * @return projectName and userName */ public ProjectUser queryProjectWithUserByProcessInstanceId(int processInstanceId) { return projectMapper.queryProjectWithUserByProcessInstanceId(processInstanceId); } /** * get task worker group * * @param taskInstance taskInstance * @return workerGroupId */ public String getTaskWorkerGroup(TaskInstance taskInstance) { String workerGroup = taskInstance.getWorkerGroup(); if (StringUtils.isNotBlank(workerGroup)) { return workerGroup; } int processInstanceId = taskInstance.getProcessInstanceId(); ProcessInstance processInstance = findProcessInstanceById(processInstanceId); if (processInstance != null) { return processInstance.getWorkerGroup(); } logger.info("task : {} will use default worker group", taskInstance.getId()); return Constants.DEFAULT_WORKER_GROUP; } /** * get have perm project list * * @param userId userId * @return project list */ public List<Project> getProjectListHavePerm(int userId) { List<Project> createProjects = projectMapper.queryProjectCreatedByUser(userId); List<Project> authedProjects = projectMapper.queryAuthedProjectListByUserId(userId); if (createProjects == null) { createProjects = new ArrayList<>(); } if (authedProjects != null) { createProjects.addAll(authedProjects); } return createProjects; } /** * list unauthorized udf function * * @param userId user id * @param needChecks data source id array * @return unauthorized udf function list */ public <T> List<T> listUnauthorized(int userId, T[] needChecks, AuthorizationType authorizationType) { List<T> resultList = new ArrayList<>(); if (Objects.nonNull(needChecks) && needChecks.length > 0) { Set<T> originResSet = new HashSet<>(Arrays.asList(needChecks)); switch (authorizationType) { case RESOURCE_FILE_ID: case UDF_FILE: List<Resource> ownUdfResources = resourceMapper.listAuthorizedResourceById(userId, needChecks); addAuthorizedResources(ownUdfResources, userId); Set<Integer> authorizedResourceFiles = ownUdfResources.stream().map(Resource::getId).collect(toSet()); originResSet.removeAll(authorizedResourceFiles); break; case RESOURCE_FILE_NAME: List<Resource> ownResources = resourceMapper.listAuthorizedResource(userId, needChecks); addAuthorizedResources(ownResources, userId); Set<String> authorizedResources = ownResources.stream().map(Resource::getFullName).collect(toSet()); originResSet.removeAll(authorizedResources); break; case DATASOURCE: Set<Integer> authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId, needChecks).stream().map(DataSource::getId).collect(toSet()); originResSet.removeAll(authorizedDatasources); break; case UDF: Set<Integer> authorizedUdfs = udfFuncMapper.listAuthorizedUdfFunc(userId, needChecks).stream().map(UdfFunc::getId).collect(toSet()); originResSet.removeAll(authorizedUdfs); break; default: break; } resultList.addAll(originResSet); } return resultList; } /** * get user by user id * * @param userId user id * @return User */ public User getUserById(int userId) { return userMapper.selectById(userId); } /** * get resource by resource id * * @param resourceId resource id * @return Resource */ public Resource getResourceById(int resourceId) { return resourceMapper.selectById(resourceId); } /** * list resources by ids * * @param resIds resIds * @return resource list */ public List<Resource> listResourceByIds(Integer[] resIds) { return resourceMapper.listResourceByIds(resIds); } /** * format task app id in task instance */ public String formatTaskAppId(TaskInstance taskInstance) { ProcessInstance processInstance = findProcessInstanceById(taskInstance.getProcessInstanceId()); if (processInstance == null) { return ""; } ProcessDefinition definition = findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); if (definition == null) { return ""; } return String.format("%s_%s_%s", definition.getId(), processInstance.getId(), taskInstance.getId()); } /** * switch process definition version to process definition log version */ public int switchVersion(ProcessDefinition processDefinition, ProcessDefinitionLog processDefinitionLog) { if (null == processDefinition || null == processDefinitionLog) { return Constants.DEFINITION_FAILURE; } processDefinitionLog.setId(processDefinition.getId()); processDefinitionLog.setReleaseState(ReleaseState.OFFLINE); processDefinitionLog.setFlag(Flag.YES); int result = processDefineMapper.updateById(processDefinitionLog); if (result > 0) { result = switchProcessTaskRelationVersion(processDefinition); if (result <= 0) { return Constants.DEFINITION_FAILURE; } } return result; } public int switchProcessTaskRelationVersion(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); if (!processTaskRelationList.isEmpty()) { processTaskRelationMapper.deleteByCode(processDefinition.getProjectCode(), processDefinition.getCode()); } List<ProcessTaskRelationLog> processTaskRelationLogList = processTaskRelationLogMapper.queryByProcessCodeAndVersion(processDefinition.getCode(), processDefinition.getVersion()); return processTaskRelationMapper.batchInsert(processTaskRelationLogList); } /** * get resource ids * * @param taskDefinition taskDefinition * @return resource ids */ public String getResourceIds(TaskDefinition taskDefinition) { Set<Integer> resourceIds = null; AbstractParameters params = TaskParametersUtils.getParameters(taskDefinition.getTaskType(), taskDefinition.getTaskParams()); if (params != null && CollectionUtils.isNotEmpty(params.getResourceFilesList())) { resourceIds = params.getResourceFilesList(). stream() .filter(t -> t.getId() != 0) .map(ResourceInfo::getId) .collect(Collectors.toSet()); } if (CollectionUtils.isEmpty(resourceIds)) { return StringUtils.EMPTY; } return StringUtils.join(resourceIds, ","); } public int saveTaskDefine(User operator, long projectCode, List<TaskDefinitionLog> taskDefinitionLogs) { Date now = new Date(); List<TaskDefinitionLog> newTaskDefinitionLogs = new ArrayList<>(); List<TaskDefinitionLog> updateTaskDefinitionLogs = new ArrayList<>(); for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperateTime(now); taskDefinitionLog.setOperator(operator.getId()); taskDefinitionLog.setResourceIds(getResourceIds(taskDefinitionLog)); if (taskDefinitionLog.getCode() > 0 && taskDefinitionLog.getVersion() > 0) { TaskDefinitionLog definitionCodeAndVersion = taskDefinitionLogMapper .queryByDefinitionCodeAndVersion(taskDefinitionLog.getCode(), taskDefinitionLog.getVersion()); if (definitionCodeAndVersion != null) { if (!taskDefinitionLog.equals(definitionCodeAndVersion)) { taskDefinitionLog.setUserId(definitionCodeAndVersion.getUserId()); Integer version = taskDefinitionLogMapper.queryMaxVersionForDefinition(taskDefinitionLog.getCode()); taskDefinitionLog.setVersion(version + 1); taskDefinitionLog.setCreateTime(definitionCodeAndVersion.getCreateTime()); updateTaskDefinitionLogs.add(taskDefinitionLog); } continue; } } taskDefinitionLog.setUserId(operator.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); if (taskDefinitionLog.getCode() == 0) { try { taskDefinitionLog.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); return Constants.DEFINITION_FAILURE; } } newTaskDefinitionLogs.add(taskDefinitionLog); } int insertResult = 0; int updateResult = 0; for (TaskDefinitionLog taskDefinitionToUpdate : updateTaskDefinitionLogs) { TaskDefinition task = taskDefinitionMapper.queryByCode(taskDefinitionToUpdate.getCode()); if (task == null) { newTaskDefinitionLogs.add(taskDefinitionToUpdate); } else { insertResult += taskDefinitionLogMapper.insert(taskDefinitionToUpdate); taskDefinitionToUpdate.setId(task.getId()); updateResult += taskDefinitionMapper.updateById(taskDefinitionToUpdate); } } if (!newTaskDefinitionLogs.isEmpty()) { updateResult += taskDefinitionMapper.batchInsert(newTaskDefinitionLogs); insertResult += taskDefinitionLogMapper.batchInsert(newTaskDefinitionLogs); } return (insertResult & updateResult) > 0 ? 1 : Constants.EXIT_CODE_SUCCESS; } /** * save processDefinition (including create or update processDefinition) */ public int saveProcessDefine(User operator, ProcessDefinition processDefinition, Boolean isFromProcessDefine) { ProcessDefinitionLog processDefinitionLog = new ProcessDefinitionLog(processDefinition); Integer version = processDefineLogMapper.queryMaxVersionForDefinition(processDefinition.getCode()); int insertVersion = version == null || version == 0 ? Constants.VERSION_FIRST : version + 1; processDefinitionLog.setVersion(insertVersion); processDefinitionLog.setReleaseState(isFromProcessDefine ? ReleaseState.OFFLINE : ReleaseState.ONLINE); processDefinitionLog.setOperator(operator.getId()); processDefinitionLog.setOperateTime(processDefinition.getUpdateTime()); int insertLog = processDefineLogMapper.insert(processDefinitionLog); int result; if (0 == processDefinition.getId()) { result = processDefineMapper.insert(processDefinitionLog); } else { processDefinitionLog.setId(processDefinition.getId()); result = processDefineMapper.updateById(processDefinitionLog); } return (insertLog & result) > 0 ? insertVersion : 0; } /** * save task relations */ public int saveTaskRelation(User operator, long projectCode, long processDefinitionCode, int processDefinitionVersion, List<ProcessTaskRelationLog> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, TaskDefinitionLog> taskDefinitionLogMap = null; if (CollectionUtils.isNotEmpty(taskDefinitionLogs)) { taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinition::getCode, taskDefinitionLog -> taskDefinitionLog)); } Date now = new Date(); for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { processTaskRelationLog.setProjectCode(projectCode); processTaskRelationLog.setProcessDefinitionCode(processDefinitionCode); processTaskRelationLog.setProcessDefinitionVersion(processDefinitionVersion); if (taskDefinitionLogMap != null) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(processTaskRelationLog.getPreTaskCode()); if (taskDefinitionLog != null) { processTaskRelationLog.setPreTaskVersion(taskDefinitionLog.getVersion()); } processTaskRelationLog.setPostTaskVersion(taskDefinitionLogMap.get(processTaskRelationLog.getPostTaskCode()).getVersion()); } processTaskRelationLog.setCreateTime(now); processTaskRelationLog.setUpdateTime(now); processTaskRelationLog.setOperator(operator.getId()); processTaskRelationLog.setOperateTime(now); } List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); if (!processTaskRelationList.isEmpty()) { Set<Integer> processTaskRelationSet = processTaskRelationList.stream().map(ProcessTaskRelation::hashCode).collect(toSet()); Set<Integer> taskRelationSet = taskRelationList.stream().map(ProcessTaskRelationLog::hashCode).collect(toSet()); if (CollectionUtils.isEqualCollection(processTaskRelationSet, taskRelationSet)) { return Constants.EXIT_CODE_SUCCESS; } processTaskRelationMapper.deleteByCode(projectCode, processDefinitionCode); } int result = processTaskRelationMapper.batchInsert(taskRelationList); int resultLog = processTaskRelationLogMapper.batchInsert(taskRelationList); return (result & resultLog) > 0 ? Constants.EXIT_CODE_SUCCESS : Constants.EXIT_CODE_FAILURE; } public boolean isTaskOnline(long taskCode) { List<ProcessTaskRelation> processTaskRelationList = processTaskRelationMapper.queryByTaskCode(taskCode); if (!processTaskRelationList.isEmpty()) { Set<Long> processDefinitionCodes = processTaskRelationList .stream() .map(ProcessTaskRelation::getProcessDefinitionCode) .collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefineMapper.queryByCodes(processDefinitionCodes); // check process definition is already online for (ProcessDefinition processDefinition : processDefinitionList) { if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { return true; } } } return false; } /** * Generate the DAG Graph based on the process definition id * * @param processDefinition process definition * @return dag graph */ public DAG<String, TaskNode, TaskNodeRelation> genDagGraph(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskNode> taskNodeList = transformTask(processTaskRelations, Lists.newArrayList()); ProcessDag processDag = DagHelper.getProcessDag(taskNodeList, new ArrayList<>(processTaskRelations)); // Generate concrete Dag to be executed return DagHelper.buildDagGraph(processDag); } /** * generate DagData */ public DagData genDagData(ProcessDefinition processDefinition) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<TaskDefinitionLog> taskDefinitionLogList = genTaskDefineList(processTaskRelations); List<TaskDefinition> taskDefinitions = taskDefinitionLogList.stream() .map(taskDefinitionLog -> JSONUtils.parseObject(JSONUtils.toJsonString(taskDefinitionLog), TaskDefinition.class)) .collect(Collectors.toList()); return new DagData(processDefinition, processTaskRelations, taskDefinitions); } public List<TaskDefinitionLog> genTaskDefineList(List<ProcessTaskRelation> processTaskRelations) { Set<TaskDefinition> taskDefinitionSet = new HashSet<>(); for (ProcessTaskRelation processTaskRelation : processTaskRelations) { if (processTaskRelation.getPreTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPreTaskCode(), processTaskRelation.getPreTaskVersion())); } if (processTaskRelation.getPostTaskCode() > 0) { taskDefinitionSet.add(new TaskDefinition(processTaskRelation.getPostTaskCode(), processTaskRelation.getPostTaskVersion())); } } return taskDefinitionLogMapper.queryByTaskDefinitions(taskDefinitionSet); } /** * find task definition by code and version */ public TaskDefinition findTaskDefinition(long taskCode, int taskDefinitionVersion) { return taskDefinitionLogMapper.queryByDefinitionCodeAndVersion(taskCode, taskDefinitionVersion); } /** * find process task relation list by projectCode and processDefinitionCode */ public List<ProcessTaskRelation> findRelationByCode(long projectCode, long processDefinitionCode) { return processTaskRelationMapper.queryByProcessCode(projectCode, processDefinitionCode); } /** * add authorized resources * * @param ownResources own resources * @param userId userId */ private void addAuthorizedResources(List<Resource> ownResources, int userId) { List<Integer> relationResourceIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 7); List<Resource> relationResources = CollectionUtils.isNotEmpty(relationResourceIds) ? resourceMapper.queryResourceListById(relationResourceIds) : new ArrayList<>(); ownResources.addAll(relationResources); } /** * Use temporarily before refactoring taskNode */ public List<TaskNode> transformTask(List<ProcessTaskRelation> taskRelationList, List<TaskDefinitionLog> taskDefinitionLogs) { Map<Long, List<Long>> taskCodeMap = new HashMap<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { taskCodeMap.compute(processTaskRelation.getPostTaskCode(), (k, v) -> { if (v == null) { v = new ArrayList<>(); } if (processTaskRelation.getPreTaskCode() != 0L) { v.add(processTaskRelation.getPreTaskCode()); } return v; }); } if (CollectionUtils.isEmpty(taskDefinitionLogs)) { taskDefinitionLogs = genTaskDefineList(taskRelationList); } Map<Long, TaskDefinitionLog> taskDefinitionLogMap = taskDefinitionLogs.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); List<TaskNode> taskNodeList = new ArrayList<>(); for (Entry<Long, List<Long>> code : taskCodeMap.entrySet()) { TaskDefinitionLog taskDefinitionLog = taskDefinitionLogMap.get(code.getKey()); if (taskDefinitionLog != null) { TaskNode taskNode = new TaskNode(); taskNode.setCode(taskDefinitionLog.getCode()); taskNode.setVersion(taskDefinitionLog.getVersion()); taskNode.setName(taskDefinitionLog.getName()); taskNode.setDesc(taskDefinitionLog.getDescription()); taskNode.setType(taskDefinitionLog.getTaskType().toUpperCase()); taskNode.setRunFlag(taskDefinitionLog.getFlag() == Flag.YES ? Constants.FLOWNODE_RUN_FLAG_NORMAL : Constants.FLOWNODE_RUN_FLAG_FORBIDDEN); taskNode.setMaxRetryTimes(taskDefinitionLog.getFailRetryTimes()); taskNode.setRetryInterval(taskDefinitionLog.getFailRetryInterval()); Map<String, Object> taskParamsMap = taskNode.taskParamsToJsonObj(taskDefinitionLog.getTaskParams()); taskNode.setConditionResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.CONDITION_RESULT))); taskNode.setSwitchResult(JSONUtils.toJsonString(taskParamsMap.get(Constants.SWITCH_RESULT))); taskNode.setDependence(JSONUtils.toJsonString(taskParamsMap.get(Constants.DEPENDENCE))); taskParamsMap.remove(Constants.CONDITION_RESULT); taskParamsMap.remove(Constants.DEPENDENCE); taskNode.setParams(JSONUtils.toJsonString(taskParamsMap)); taskNode.setTaskInstancePriority(taskDefinitionLog.getTaskPriority()); taskNode.setWorkerGroup(taskDefinitionLog.getWorkerGroup()); taskNode.setEnvironmentCode(taskDefinitionLog.getEnvironmentCode()); taskNode.setTimeout(JSONUtils.toJsonString(new TaskTimeoutParameter(taskDefinitionLog.getTimeoutFlag() == TimeoutFlag.OPEN, taskDefinitionLog.getTimeoutNotifyStrategy(), taskDefinitionLog.getTimeout()))); taskNode.setDelayTime(taskDefinitionLog.getDelayTime()); taskNode.setPreTasks(JSONUtils.toJsonString(code.getValue().stream().map(taskDefinitionLogMap::get).map(TaskDefinition::getName).collect(Collectors.toList()))); taskNodeList.add(taskNode); } } return taskNodeList; } public Map<ProcessInstance, TaskInstance> notifyProcessList(int processId, int taskId) { HashMap<ProcessInstance, TaskInstance> processTaskMap = new HashMap<>(); //find sub tasks ProcessInstanceMap processInstanceMap = processInstanceMapMapper.queryBySubProcessId(processId); if (processInstanceMap == null) { return processTaskMap; } ProcessInstance fatherProcess = this.findProcessInstanceById(processInstanceMap.getParentProcessInstanceId()); TaskInstance fatherTask = this.findTaskInstanceById(processInstanceMap.getParentTaskInstanceId()); if (fatherProcess != null) { processTaskMap.put(fatherProcess, fatherTask); } return processTaskMap; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,112
[Bug][MasterServer] StateEventResponseWorker will make cpu high
**Describe the bug** https://github.com/apache/dolphinscheduler/blob/93ef12366b422d1410c8de45e541ef4f97239a84/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/queue/StateEventResponseService.java#L104-L121 In the line 116, when the eventQueue throw an `InterruptedException`, we will catch this exception and reset the current thread state to interrupt, in most case, it is suitable for most cases. But in this case, we will use while(true) to loop, and when the current thread is interrupt, the blockqueue will throw InterruptedException. So the code is equals with below ```java while(Stopper.isRunning()) { logger.warn("persist task error", e); } ``` **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6112
https://github.com/apache/dolphinscheduler/pull/6281
2d02f78ba7fbe0cf0e83db2b841eae42b8a8f855
ebb56b2620b7fb3c7dba70ba9342bddc909a2e75
"2021-09-06T12:27:59Z"
java
"2021-09-26T03:57:37Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/StateEventProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.processor; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.CommandType; import org.apache.dolphinscheduler.remote.command.StateEventChangeCommand; import org.apache.dolphinscheduler.remote.processor.NettyRequestProcessor; import org.apache.dolphinscheduler.server.master.processor.queue.StateEventResponseService; import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import java.util.concurrent.ConcurrentHashMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import io.netty.channel.Channel; /** * handle state event received from master/api */ public class StateEventProcessor implements NettyRequestProcessor { private final Logger logger = LoggerFactory.getLogger(StateEventProcessor.class); private StateEventResponseService stateEventResponseService; public StateEventProcessor() { stateEventResponseService = SpringApplicationContext.getBean(StateEventResponseService.class); } public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceExecMaps) { this.stateEventResponseService.init(processInstanceExecMaps); } @Override public void process(Channel channel, Command command) { Preconditions.checkArgument(CommandType.STATE_EVENT_REQUEST == command.getType(), String.format("invalid command type: %s", command.getType())); StateEventChangeCommand stateEventChangeCommand = JSONUtils.parseObject(command.getBody(), StateEventChangeCommand.class); StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); stateEvent.setKey(stateEventChangeCommand.getKey()); stateEvent.setProcessInstanceId(stateEventChangeCommand.getDestProcessInstanceId()); stateEvent.setTaskInstanceId(stateEventChangeCommand.getDestTaskInstanceId()); StateEventType type = stateEvent.getTaskInstanceId() == 0 ? StateEventType.PROCESS_STATE_CHANGE : StateEventType.TASK_STATE_CHANGE; stateEvent.setType(type); logger.info("received command : {}", stateEvent.toString()); stateEventResponseService.addResponse(stateEvent); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,112
[Bug][MasterServer] StateEventResponseWorker will make cpu high
**Describe the bug** https://github.com/apache/dolphinscheduler/blob/93ef12366b422d1410c8de45e541ef4f97239a84/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/queue/StateEventResponseService.java#L104-L121 In the line 116, when the eventQueue throw an `InterruptedException`, we will catch this exception and reset the current thread state to interrupt, in most case, it is suitable for most cases. But in this case, we will use while(true) to loop, and when the current thread is interrupt, the blockqueue will throw InterruptedException. So the code is equals with below ```java while(Stopper.isRunning()) { logger.warn("persist task error", e); } ``` **Which version of Dolphin Scheduler:** -[dev]
https://github.com/apache/dolphinscheduler/issues/6112
https://github.com/apache/dolphinscheduler/pull/6281
2d02f78ba7fbe0cf0e83db2b841eae42b8a8f855
ebb56b2620b7fb3c7dba70ba9342bddc909a2e75
"2021-09-06T12:27:59Z"
java
"2021-09-26T03:57:37Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/processor/queue/StateEventResponseService.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.processor.queue; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.remote.command.StateEventResponseCommand; import org.apache.dolphinscheduler.server.master.runner.WorkflowExecuteThread; import java.util.ArrayList; import java.util.List; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.stereotype.Component; import io.netty.channel.Channel; /** * task manager */ @Component public class StateEventResponseService { /** * logger */ private final Logger logger = LoggerFactory.getLogger(StateEventResponseService.class); /** * attemptQueue */ private final BlockingQueue<StateEvent> eventQueue = new LinkedBlockingQueue<>(5000); /** * task response worker */ private Thread responseWorker; private ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceMapper; public void init(ConcurrentHashMap<Integer, WorkflowExecuteThread> processInstanceMapper) { if (this.processInstanceMapper == null) { this.processInstanceMapper = processInstanceMapper; } } @PostConstruct public void start() { this.responseWorker = new StateEventResponseWorker(); this.responseWorker.setName("StateEventResponseWorker"); this.responseWorker.start(); } @PreDestroy public void stop() { this.responseWorker.interrupt(); if (!eventQueue.isEmpty()) { List<StateEvent> remainEvents = new ArrayList<>(eventQueue.size()); eventQueue.drainTo(remainEvents); for (StateEvent event : remainEvents) { this.persist(event); } } } /** * put task to attemptQueue */ public void addResponse(StateEvent stateEvent) { try { eventQueue.put(stateEvent); } catch (InterruptedException e) { logger.error("put state event : {} error :{}", stateEvent, e); Thread.currentThread().interrupt(); } } /** * task worker thread */ class StateEventResponseWorker extends Thread { @Override public void run() { while (Stopper.isRunning()) { try { // if not task , blocking here StateEvent stateEvent = eventQueue.take(); persist(stateEvent); } catch (InterruptedException e) { logger.warn("persist task error", e); Thread.currentThread().interrupt(); } } logger.info("StateEventResponseWorker stopped"); } } private void writeResponse(StateEvent stateEvent, ExecutionStatus status) { Channel channel = stateEvent.getChannel(); if (channel != null) { StateEventResponseCommand command = new StateEventResponseCommand(status.getCode(), stateEvent.getKey()); channel.writeAndFlush(command.convert2Command()); } } private void persist(StateEvent stateEvent) { try { if (!this.processInstanceMapper.containsKey(stateEvent.getProcessInstanceId())) { writeResponse(stateEvent, ExecutionStatus.FAILURE); return; } WorkflowExecuteThread workflowExecuteThread = this.processInstanceMapper.get(stateEvent.getProcessInstanceId()); workflowExecuteThread.addStateEvent(stateEvent); writeResponse(stateEvent, ExecutionStatus.SUCCESS); } catch (Exception e) { logger.error("persist event queue error:", stateEvent.toString(), e); } } public BlockingQueue<StateEvent> getEventQueue() { return eventQueue; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,352
[Bug] [ApiServer] override the old process definition when I use the copy workflow feature
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev When I try to copy workflow form process definition `python001`, it doesn't create a new process definition but update the old one with name like `python001_copy_20210926112401233`. ### What you expected to happen create a new process definition instead of an override. ### How to reproduce just create a process definition and copy it. ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6352
https://github.com/apache/dolphinscheduler/pull/6357
58eb20a11b7b47c53960f211f83549582fd53072
c56daede2f9812e88b2ea4a5f5ab4436782b171c
"2021-09-26T03:43:30Z"
java
"2021-09-26T08:36:54Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ProcessDefinitionServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_SUB_PROCESS_DEFINE_ID; import org.apache.dolphinscheduler.api.dto.DagDataSchedule; import org.apache.dolphinscheduler.api.dto.treeview.Instance; import org.apache.dolphinscheduler.api.dto.treeview.TreeViewDto; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ProcessDefinitionService; import org.apache.dolphinscheduler.api.service.ProcessInstanceService; import org.apache.dolphinscheduler.api.service.ProjectService; import org.apache.dolphinscheduler.api.service.SchedulerService; import org.apache.dolphinscheduler.api.utils.CheckUtils; import org.apache.dolphinscheduler.api.utils.FileUtils; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.AuthorizationType; import org.apache.dolphinscheduler.common.enums.ReleaseState; import org.apache.dolphinscheduler.common.enums.UserType; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.thread.Stopper; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils; import org.apache.dolphinscheduler.common.utils.SnowFlakeUtils.SnowFlakeException; import org.apache.dolphinscheduler.dao.entity.DagData; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessDefinitionLog; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelation; import org.apache.dolphinscheduler.dao.entity.ProcessTaskRelationLog; import org.apache.dolphinscheduler.dao.entity.Project; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskDefinitionLog; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationLogMapper; import org.apache.dolphinscheduler.dao.mapper.ProcessTaskRelationMapper; import org.apache.dolphinscheduler.dao.mapper.ProjectMapper; import org.apache.dolphinscheduler.dao.mapper.ScheduleMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionLogMapper; import org.apache.dolphinscheduler.dao.mapper.TaskDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.TaskInstanceMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.service.permission.PermissionCheck; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.commons.lang.StringUtils; import java.io.BufferedOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.MediaType; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.google.common.collect.Lists; /** * process definition service impl */ @Service public class ProcessDefinitionServiceImpl extends BaseServiceImpl implements ProcessDefinitionService { private static final Logger logger = LoggerFactory.getLogger(ProcessDefinitionServiceImpl.class); private static final String RELEASESTATE = "releaseState"; @Autowired private ProjectMapper projectMapper; @Autowired private ProjectService projectService; @Autowired private UserMapper userMapper; @Autowired private ProcessDefinitionLogMapper processDefinitionLogMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; @Autowired private ProcessInstanceService processInstanceService; @Autowired private TaskInstanceMapper taskInstanceMapper; @Autowired private ScheduleMapper scheduleMapper; @Autowired private ProcessService processService; @Autowired private ProcessTaskRelationMapper processTaskRelationMapper; @Autowired private ProcessTaskRelationLogMapper processTaskRelationLogMapper; @Autowired TaskDefinitionLogMapper taskDefinitionLogMapper; @Autowired private TaskDefinitionMapper taskDefinitionMapper; @Autowired private SchedulerService schedulerService; @Autowired private TenantMapper tenantMapper; /** * create process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return create result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> createProcessDefinition(User loginUser, long projectCode, String name, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } long processDefinitionCode; try { processDefinitionCode = SnowFlakeUtils.getInstance().nextId(); } catch (SnowFlakeException e) { putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS); return result; } ProcessDefinition processDefinition = new ProcessDefinition(projectCode, name, processDefinitionCode, description, globalParams, locations, timeout, loginUser.getId(), tenantId); return createDagDefine(loginUser, taskRelationList, processDefinition, taskDefinitionLogs); } private Map<String, Object> createDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } int insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); if (insertVersion == 0) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.CREATE_PROCESS_TASK_RELATION_ERROR); throw new ServiceException(Status.CREATE_PROCESS_TASK_RELATION_ERROR); } return result; } private Map<String, Object> checkTaskDefinitionList(List<TaskDefinitionLog> taskDefinitionLogs, String taskDefinitionJson) { Map<String, Object> result = new HashMap<>(); try { if (taskDefinitionLogs.isEmpty()) { logger.error("taskDefinitionJson invalid: {}", taskDefinitionJson); putMsg(result, Status.DATA_IS_NOT_VALID, taskDefinitionJson); return result; } for (TaskDefinitionLog taskDefinitionLog : taskDefinitionLogs) { if (!CheckUtils.checkTaskDefinitionParameters(taskDefinitionLog)) { logger.error("task definition {} parameter invalid", taskDefinitionLog.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskDefinitionLog.getName()); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } private Map<String, Object> checkTaskRelationList(List<ProcessTaskRelationLog> taskRelationList, String taskRelationJson, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); try { if (taskRelationList == null || taskRelationList.isEmpty()) { logger.error("task relation list is null"); putMsg(result, Status.DATA_IS_NOT_VALID, taskRelationJson); return result; } List<ProcessTaskRelation> processTaskRelations = taskRelationList.stream() .map(processTaskRelationLog -> JSONUtils.parseObject(JSONUtils.toJsonString(processTaskRelationLog), ProcessTaskRelation.class)) .collect(Collectors.toList()); List<TaskNode> taskNodeList = processService.transformTask(processTaskRelations, taskDefinitionLogs); if (taskNodeList.size() != taskRelationList.size()) { Set<Long> postTaskCodes = taskRelationList.stream().map(ProcessTaskRelationLog::getPostTaskCode).collect(Collectors.toSet()); Set<Long> taskNodeCodes = taskNodeList.stream().map(TaskNode::getCode).collect(Collectors.toSet()); Collection<Long> codes = CollectionUtils.subtract(postTaskCodes, taskNodeCodes); if (CollectionUtils.isNotEmpty(codes)) { logger.error("the task code is not exit"); putMsg(result, Status.TASK_DEFINE_NOT_EXIST, StringUtils.join(codes, Constants.COMMA)); return result; } } if (graphHasCycle(taskNodeList)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the task relation json is normal for (ProcessTaskRelationLog processTaskRelationLog : taskRelationList) { if (processTaskRelationLog.getPostTaskCode() == 0) { logger.error("the post_task_code or post_task_version can't be zero"); putMsg(result, Status.CHECK_PROCESS_TASK_RELATION_ERROR); return result; } } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * query process definition list * * @param loginUser login user * @param projectCode project code * @return definition list */ @Override public Map<String, Object> queryProcessDefinitionList(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> resourceList = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = resourceList.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * query process definition list paging * * @param loginUser login user * @param projectCode project code * @param searchVal search value * @param userId user id * @param pageNo page number * @param pageSize page size * @return process definition page */ @Override public Result queryProcessDefinitionListPaging(User loginUser, long projectCode, String searchVal, Integer userId, Integer pageNo, Integer pageSize) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } Page<ProcessDefinition> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinition> processDefinitionIPage = processDefinitionMapper.queryDefineListPaging( page, searchVal, userId, project.getCode(), isAdmin(loginUser)); List<ProcessDefinition> records = processDefinitionIPage.getRecords(); for (ProcessDefinition pd : records) { ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(pd.getCode(), pd.getVersion()); User user = userMapper.selectById(processDefinitionLog.getOperator()); pd.setModifyBy(user.getUserName()); } processDefinitionIPage.setRecords(records); PageInfo<ProcessDefinition> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int) processDefinitionIPage.getTotal()); pageInfo.setTotalList(processDefinitionIPage.getRecords()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * query detail of process definition * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return process definition detail */ @Override public Map<String, Object> queryProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { Tenant tenant = tenantMapper.queryById(processDefinition.getTenantId()); if (tenant != null) { processDefinition.setTenantCode(tenant.getTenantCode()); } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } @Override public Map<String, Object> queryProcessDefinitionByName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, name); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, name); } else { DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData); putMsg(result, Status.SUCCESS); } return result; } /** * update process definition * * @param loginUser login user * @param projectCode project code * @param name process definition name * @param code process definition code * @param description description * @param globalParams global params * @param locations locations for nodes * @param timeout timeout * @param tenantCode tenantCode * @param taskRelationJson relation json for nodes * @param taskDefinitionJson taskDefinitionJson * @return update result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> updateProcessDefinition(User loginUser, long projectCode, String name, long code, String description, String globalParams, String locations, int timeout, String tenantCode, String taskRelationJson, String taskDefinitionJson) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<TaskDefinitionLog> taskDefinitionLogs = JSONUtils.toList(taskDefinitionJson, TaskDefinitionLog.class); Map<String, Object> checkTaskDefinitions = checkTaskDefinitionList(taskDefinitionLogs, taskDefinitionJson); if (checkTaskDefinitions.get(Constants.STATUS) != Status.SUCCESS) { return checkTaskDefinitions; } List<ProcessTaskRelationLog> taskRelationList = JSONUtils.toList(taskRelationJson, ProcessTaskRelationLog.class); Map<String, Object> checkRelationJson = checkTaskRelationList(taskRelationList, taskRelationJson, taskDefinitionLogs); if (checkRelationJson.get(Constants.STATUS) != Status.SUCCESS) { return checkRelationJson; } int tenantId = -1; if (!Constants.DEFAULT.equals(tenantCode)) { Tenant tenant = tenantMapper.queryByTenantCode(tenantCode); if (tenant == null) { putMsg(result, Status.TENANT_NOT_EXIST); return result; } tenantId = tenant.getId(); } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); // check process definition exists if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { // online can not permit edit putMsg(result, Status.PROCESS_DEFINE_NOT_ALLOWED_EDIT, processDefinition.getName()); return result; } if (!name.equals(processDefinition.getName())) { // check whether the new process define name exist ProcessDefinition definition = processDefinitionMapper.verifyByDefineName(project.getCode(), name); if (definition != null) { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name); return result; } } ProcessDefinition processDefinitionDeepCopy = JSONUtils.parseObject(JSONUtils.toJsonString(processDefinition), ProcessDefinition.class); processDefinition.set(projectCode, name, description, globalParams, locations, timeout, tenantId); return updateDagDefine(loginUser, taskRelationList, processDefinition, processDefinitionDeepCopy, taskDefinitionLogs); } private Map<String, Object> updateDagDefine(User loginUser, List<ProcessTaskRelationLog> taskRelationList, ProcessDefinition processDefinition, ProcessDefinition processDefinitionDeepCopy, List<TaskDefinitionLog> taskDefinitionLogs) { Map<String, Object> result = new HashMap<>(); int saveTaskResult = processService.saveTaskDefine(loginUser, processDefinition.getProjectCode(), taskDefinitionLogs); if (saveTaskResult == Constants.EXIT_CODE_SUCCESS) { logger.info("The task has not changed, so skip"); } if (saveTaskResult == Constants.DEFINITION_FAILURE) { putMsg(result, Status.UPDATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_TASK_DEFINITION_ERROR); } int insertVersion; if (processDefinition.equals(processDefinitionDeepCopy)) { insertVersion = processDefinitionDeepCopy.getVersion(); } else { processDefinition.setUpdateTime(new Date()); insertVersion = processService.saveProcessDefine(loginUser, processDefinition, true); } if (insertVersion == 0) { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } int insertResult = processService.saveTaskRelation(loginUser, processDefinition.getProjectCode(), processDefinition.getCode(), insertVersion, taskRelationList, taskDefinitionLogs); if (insertResult == Constants.EXIT_CODE_SUCCESS) { putMsg(result, Status.SUCCESS); result.put(Constants.DATA_LIST, processDefinition); } else { putMsg(result, Status.UPDATE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.UPDATE_PROCESS_DEFINITION_ERROR); } return result; } /** * verify process definition name unique * * @param loginUser login user * @param projectCode project code * @param name name * @return true if process definition name not exists, otherwise false */ @Override public Map<String, Object> verifyProcessDefinitionName(User loginUser, long projectCode, String name) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.verifyByDefineName(project.getCode(), name.trim()); if (processDefinition == null) { putMsg(result, Status.SUCCESS); } else { putMsg(result, Status.PROCESS_DEFINITION_NAME_EXIST, name.trim()); } return result; } /** * delete process definition by code * * @param loginUser login user * @param projectCode project code * @param code process definition code * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionByCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } // Determine if the login user is the owner of the process definition if (loginUser.getId() != processDefinition.getUserId() && loginUser.getUserType() != UserType.ADMIN_USER) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } // check process definition is already online if (processDefinition.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.PROCESS_DEFINE_STATE_ONLINE, code); return result; } // check process instances is already running List<ProcessInstance> processInstances = processInstanceService.queryByProcessDefineCodeAndStatus(processDefinition.getCode(), Constants.NOT_TERMINATED_STATES); if (CollectionUtils.isNotEmpty(processInstances)) { putMsg(result, Status.DELETE_PROCESS_DEFINITION_BY_CODE_FAIL, processInstances.size()); return result; } // get the timing according to the process definition List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(code); if (!schedules.isEmpty() && schedules.size() > 1) { logger.warn("scheduler num is {},Greater than 1", schedules.size()); putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); return result; } else if (schedules.size() == 1) { Schedule schedule = schedules.get(0); if (schedule.getReleaseState() == ReleaseState.OFFLINE) { int delete = scheduleMapper.deleteById(schedule.getId()); if (delete == 0) { putMsg(result, Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); throw new ServiceException(Status.DELETE_SCHEDULE_CRON_BY_ID_ERROR); } } else if (schedule.getReleaseState() == ReleaseState.ONLINE) { putMsg(result, Status.SCHEDULE_CRON_STATE_ONLINE, schedule.getId()); return result; } } int delete = processDefinitionMapper.deleteById(processDefinition.getId()); int deleteRelation = processTaskRelationMapper.deleteByCode(project.getCode(), processDefinition.getCode()); if (delete == 0 || deleteRelation == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * release process definition: online / offline * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param releaseState release state * @return release result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> releaseProcessDefinition(User loginUser, long projectCode, long code, ReleaseState releaseState) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } // check state if (null == releaseState) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); switch (releaseState) { case ONLINE: // To check resources whether they are already cancel authorized or deleted String resourceIds = processDefinition.getResourceIds(); if (StringUtils.isNotBlank(resourceIds)) { Integer[] resourceIdArray = Arrays.stream(resourceIds.split(Constants.COMMA)).map(Integer::parseInt).toArray(Integer[]::new); PermissionCheck<Integer> permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE_ID, processService, resourceIdArray, loginUser.getId(), logger); try { permissionCheck.checkPermission(); } catch (Exception e) { logger.error(e.getMessage(), e); putMsg(result, Status.RESOURCE_NOT_EXIST_OR_NO_PERMISSION, RELEASESTATE); return result; } } processDefinition.setReleaseState(releaseState); processDefinitionMapper.updateById(processDefinition); break; case OFFLINE: processDefinition.setReleaseState(releaseState); int updateProcess = processDefinitionMapper.updateById(processDefinition); List<Schedule> scheduleList = scheduleMapper.selectAllByProcessDefineArray( new long[]{processDefinition.getCode()} ); if (updateProcess > 0 && scheduleList.size() == 1) { Schedule schedule = scheduleList.get(0); logger.info("set schedule offline, project id: {}, schedule id: {}, process definition code: {}", project.getId(), schedule.getId(), code); // set status schedule.setReleaseState(ReleaseState.OFFLINE); int updateSchedule = scheduleMapper.updateById(schedule); if (updateSchedule == 0) { putMsg(result, Status.OFFLINE_SCHEDULE_ERROR); throw new ServiceException(Status.OFFLINE_SCHEDULE_ERROR); } schedulerService.deleteSchedule(project.getId(), schedule.getId()); } break; default: putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR, RELEASESTATE); return result; } putMsg(result, Status.SUCCESS); return result; } /** * batch export process definition by codes */ @Override public void batchExportProcessDefinitionByCodes(User loginUser, long projectCode, String codes, HttpServletResponse response) { if (StringUtils.isEmpty(codes)) { return; } Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); List<DagDataSchedule> dagDataSchedules = processDefinitionList.stream().map(this::exportProcessDagData).collect(Collectors.toList()); if (CollectionUtils.isNotEmpty(dagDataSchedules)) { downloadProcessDefinitionFile(response, dagDataSchedules); } } /** * download the process definition file */ private void downloadProcessDefinitionFile(HttpServletResponse response, List<DagDataSchedule> dagDataSchedules) { response.setContentType(MediaType.APPLICATION_JSON_UTF8_VALUE); BufferedOutputStream buff = null; ServletOutputStream out = null; try { out = response.getOutputStream(); buff = new BufferedOutputStream(out); buff.write(JSONUtils.toJsonString(dagDataSchedules).getBytes(StandardCharsets.UTF_8)); buff.flush(); buff.close(); } catch (IOException e) { logger.warn("export process fail", e); } finally { if (null != buff) { try { buff.close(); } catch (Exception e) { logger.warn("export process buffer not close", e); } } if (null != out) { try { out.close(); } catch (Exception e) { logger.warn("export process output stream not close", e); } } } } /** * get export process dag data * * @param processDefinition process definition * @return DagDataSchedule */ public DagDataSchedule exportProcessDagData(ProcessDefinition processDefinition) { List<Schedule> schedules = scheduleMapper.queryByProcessDefinitionCode(processDefinition.getCode()); DagDataSchedule dagDataSchedule = new DagDataSchedule(processService.genDagData(processDefinition)); if (!schedules.isEmpty()) { Schedule schedule = schedules.get(0); schedule.setReleaseState(ReleaseState.OFFLINE); dagDataSchedule.setSchedule(schedule); } return dagDataSchedule; } /** * import process definition * * @param loginUser login user * @param projectCode project code * @param file process metadata json file * @return import process */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> importProcessDefinition(User loginUser, long projectCode, MultipartFile file) { Map<String, Object> result = new HashMap<>(); String dagDataScheduleJson = FileUtils.file2String(file); List<DagDataSchedule> dagDataScheduleList = JSONUtils.toList(dagDataScheduleJson, DagDataSchedule.class); //check file content if (CollectionUtils.isEmpty(dagDataScheduleList)) { putMsg(result, Status.DATA_IS_NULL, "fileContent"); return result; } for (DagDataSchedule dagDataSchedule : dagDataScheduleList) { if (!checkAndImport(loginUser, projectCode, result, dagDataSchedule)) { return result; } } return result; } /** * check and import */ private boolean checkAndImport(User loginUser, long projectCode, Map<String, Object> result, DagDataSchedule dagDataSchedule) { if (!checkImportanceParams(dagDataSchedule, result)) { return false; } ProcessDefinition processDefinition = dagDataSchedule.getProcessDefinition(); //unique check Map<String, Object> checkResult = verifyProcessDefinitionName(loginUser, projectCode, processDefinition.getName()); if (Status.SUCCESS.equals(checkResult.get(Constants.STATUS))) { putMsg(result, Status.SUCCESS); } else { result.putAll(checkResult); return false; } String processDefinitionName = recursionProcessDefinitionName(projectCode, processDefinition.getName(), 1); processDefinition.setName(processDefinitionName + "_import_" + DateUtils.getCurrentTimeStamp()); processDefinition.setUserId(loginUser.getId()); try { processDefinition.setCode(SnowFlakeUtils.getInstance().nextId()); } catch (SnowFlakeException e) { putMsg(result, Status.CREATE_PROCESS_DEFINITION_ERROR); return false; } List<TaskDefinition> taskDefinitionList = dagDataSchedule.getTaskDefinitionList(); Map<Long, Long> taskCodeMap = new HashMap<>(); Date now = new Date(); List<TaskDefinitionLog> taskDefinitionLogList = new ArrayList<>(); for (TaskDefinition taskDefinition : taskDefinitionList) { TaskDefinitionLog taskDefinitionLog = new TaskDefinitionLog(taskDefinition); taskDefinitionLog.setName(taskDefinitionLog.getName() + "_import_" + DateUtils.getCurrentTimeStamp()); taskDefinitionLog.setProjectCode(projectCode); taskDefinitionLog.setUserId(loginUser.getId()); taskDefinitionLog.setVersion(Constants.VERSION_FIRST); taskDefinitionLog.setCreateTime(now); taskDefinitionLog.setUpdateTime(now); taskDefinitionLog.setOperator(loginUser.getId()); taskDefinitionLog.setOperateTime(now); try { long code = SnowFlakeUtils.getInstance().nextId(); taskCodeMap.put(taskDefinitionLog.getCode(), code); taskDefinitionLog.setCode(code); } catch (SnowFlakeException e) { logger.error("Task code get error, ", e); putMsg(result, Status.INTERNAL_SERVER_ERROR_ARGS, "Error generating task definition code"); return false; } taskDefinitionLogList.add(taskDefinitionLog); } int insert = taskDefinitionMapper.batchInsert(taskDefinitionLogList); int logInsert = taskDefinitionLogMapper.batchInsert(taskDefinitionLogList); if ((logInsert & insert) == 0) { putMsg(result, Status.CREATE_TASK_DEFINITION_ERROR); throw new ServiceException(Status.CREATE_TASK_DEFINITION_ERROR); } List<ProcessTaskRelation> taskRelationList = dagDataSchedule.getProcessTaskRelationList(); List<ProcessTaskRelationLog> taskRelationLogList = new ArrayList<>(); for (ProcessTaskRelation processTaskRelation : taskRelationList) { ProcessTaskRelationLog processTaskRelationLog = new ProcessTaskRelationLog(processTaskRelation); processTaskRelationLog.setPreTaskCode(taskCodeMap.get(processTaskRelationLog.getPreTaskCode())); processTaskRelationLog.setPostTaskCode(taskCodeMap.get(processTaskRelationLog.getPostTaskCode())); processTaskRelationLog.setPreTaskVersion(Constants.VERSION_FIRST); processTaskRelationLog.setPostTaskVersion(Constants.VERSION_FIRST); taskRelationLogList.add(processTaskRelationLog); } Map<String, Object> createDagResult = createDagDefine(loginUser, taskRelationLogList, processDefinition, Lists.newArrayList()); if (Status.SUCCESS.equals(createDagResult.get(Constants.STATUS))) { putMsg(createDagResult, Status.SUCCESS); } else { result.putAll(createDagResult); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } Schedule schedule = dagDataSchedule.getSchedule(); if (null != schedule) { ProcessDefinition newProcessDefinition = processDefinitionMapper.queryByCode(processDefinition.getCode()); schedule.setProcessDefinitionCode(newProcessDefinition.getCode()); schedule.setUserId(loginUser.getId()); schedule.setCreateTime(now); schedule.setUpdateTime(now); int scheduleInsert = scheduleMapper.insert(schedule); if (0 == scheduleInsert) { putMsg(result, Status.IMPORT_PROCESS_DEFINE_ERROR); throw new ServiceException(Status.IMPORT_PROCESS_DEFINE_ERROR); } } return true; } /** * check importance params */ private boolean checkImportanceParams(DagDataSchedule dagDataSchedule, Map<String, Object> result) { if (dagDataSchedule.getProcessDefinition() == null) { putMsg(result, Status.DATA_IS_NULL, "ProcessDefinition"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getTaskDefinitionList())) { putMsg(result, Status.DATA_IS_NULL, "TaskDefinitionList"); return false; } if (CollectionUtils.isEmpty(dagDataSchedule.getProcessTaskRelationList())) { putMsg(result, Status.DATA_IS_NULL, "ProcessTaskRelationList"); return false; } return true; } private String recursionProcessDefinitionName(long projectCode, String processDefinitionName, int num) { ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(projectCode, processDefinitionName); if (processDefinition != null) { if (num > 1) { String str = processDefinitionName.substring(0, processDefinitionName.length() - 3); processDefinitionName = str + "(" + num + ")"; } else { processDefinitionName = processDefinition.getName() + "(" + num + ")"; } } else { return processDefinitionName; } return recursionProcessDefinitionName(projectCode, processDefinitionName, num + 1); } /** * check the process task relation json * * @param processTaskRelationJson process task relation json * @return check result code */ @Override public Map<String, Object> checkProcessNodeList(String processTaskRelationJson) { Map<String, Object> result = new HashMap<>(); try { if (processTaskRelationJson == null) { logger.error("process data is null"); putMsg(result, Status.DATA_IS_NOT_VALID, processTaskRelationJson); return result; } List<ProcessTaskRelation> taskRelationList = JSONUtils.toList(processTaskRelationJson, ProcessTaskRelation.class); // Check whether the task node is normal List<TaskNode> taskNodes = processService.transformTask(taskRelationList, Lists.newArrayList()); if (CollectionUtils.isEmpty(taskNodes)) { logger.error("process node info is empty"); putMsg(result, Status.PROCESS_DAG_IS_EMPTY); return result; } // check has cycle if (graphHasCycle(taskNodes)) { logger.error("process DAG has cycle"); putMsg(result, Status.PROCESS_NODE_HAS_CYCLE); return result; } // check whether the process definition json is normal for (TaskNode taskNode : taskNodes) { if (!CheckUtils.checkTaskNodeParameters(taskNode)) { logger.error("task node {} parameter invalid", taskNode.getName()); putMsg(result, Status.PROCESS_NODE_S_PARAMETER_INVALID, taskNode.getName()); return result; } // check extra params CheckUtils.checkOtherParams(taskNode.getExtras()); } putMsg(result, Status.SUCCESS); } catch (Exception e) { result.put(Constants.STATUS, Status.REQUEST_PARAMS_NOT_VALID_ERROR); result.put(Constants.MSG, e.getMessage()); } return result; } /** * get task node details based on process definition * * @param loginUser loginUser * @param projectCode project code * @param code process definition code * @return task node list */ @Override public Map<String, Object> getTaskNodeListByDefinitionCode(User loginUser, long projectCode, long code) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DagData dagData = processService.genDagData(processDefinition); result.put(Constants.DATA_LIST, dagData.getTaskDefinitionList()); putMsg(result, Status.SUCCESS); return result; } /** * get task node details map based on process definition * * @param loginUser loginUser * @param projectCode project code * @param codes define codes * @return task node list */ @Override public Map<String, Object> getNodeListMapByDefinitionCodes(User loginUser, long projectCode, String codes) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } Set<Long> defineCodeSet = Lists.newArrayList(codes.split(Constants.COMMA)).stream().map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(defineCodeSet); if (CollectionUtils.isEmpty(processDefinitionList)) { logger.info("process definition not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, codes); return result; } Map<Long, List<TaskDefinition>> taskNodeMap = new HashMap<>(); for (ProcessDefinition processDefinition : processDefinitionList) { DagData dagData = processService.genDagData(processDefinition); taskNodeMap.put(processDefinition.getCode(), dagData.getTaskDefinitionList()); } result.put(Constants.DATA_LIST, taskNodeMap); putMsg(result, Status.SUCCESS); return result; } /** * query process definition all by project code * * @param loginUser loginUser * @param projectCode project code * @return process definitions in the project */ @Override public Map<String, Object> queryAllProcessDefinitionByProjectCode(User loginUser, long projectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<ProcessDefinition> processDefinitions = processDefinitionMapper.queryAllDefinitionList(projectCode); List<DagData> dagDataList = processDefinitions.stream().map(processService::genDagData).collect(Collectors.toList()); result.put(Constants.DATA_LIST, dagDataList); putMsg(result, Status.SUCCESS); return result; } /** * Encapsulates the TreeView structure * * @param code process definition code * @param limit limit * @return tree view json data */ @Override public Map<String, Object> viewTree(long code, Integer limit) { Map<String, Object> result = new HashMap<>(); ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (null == processDefinition) { logger.info("process define not exists"); putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); return result; } DAG<String, TaskNode, TaskNodeRelation> dag = processService.genDagGraph(processDefinition); // nodes that is running Map<String, List<TreeViewDto>> runningNodeMap = new ConcurrentHashMap<>(); //nodes that is waiting to run Map<String, List<TreeViewDto>> waitingRunningNodeMap = new ConcurrentHashMap<>(); // List of process instances List<ProcessInstance> processInstanceList = processInstanceService.queryByProcessDefineCode(code, limit); processInstanceList.forEach(processInstance -> processInstance.setDuration(DateUtils.format2Duration(processInstance.getStartTime(), processInstance.getEndTime()))); List<TaskDefinitionLog> taskDefinitionList = processService.genTaskDefineList(processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode())); Map<Long, TaskDefinitionLog> taskDefinitionMap = taskDefinitionList.stream() .collect(Collectors.toMap(TaskDefinitionLog::getCode, taskDefinitionLog -> taskDefinitionLog)); if (limit > processInstanceList.size()) { limit = processInstanceList.size(); } TreeViewDto parentTreeViewDto = new TreeViewDto(); parentTreeViewDto.setName("DAG"); parentTreeViewDto.setType(""); parentTreeViewDto.setCode(0L); // Specify the process definition, because it is a TreeView for a process definition for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); Date endTime = processInstance.getEndTime() == null ? new Date() : processInstance.getEndTime(); parentTreeViewDto.getInstances().add(new Instance(processInstance.getId(), processInstance.getName(), processInstance.getProcessDefinitionCode(), "", processInstance.getState().toString(), processInstance.getStartTime(), endTime, processInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - processInstance.getStartTime().getTime()))); } List<TreeViewDto> parentTreeViewDtoList = new ArrayList<>(); parentTreeViewDtoList.add(parentTreeViewDto); // Here is the encapsulation task instance for (String startNode : dag.getBeginNode()) { runningNodeMap.put(startNode, parentTreeViewDtoList); } while (Stopper.isRunning()) { Set<String> postNodeList; Iterator<Map.Entry<String, List<TreeViewDto>>> iter = runningNodeMap.entrySet().iterator(); while (iter.hasNext()) { Map.Entry<String, List<TreeViewDto>> en = iter.next(); String nodeName = en.getKey(); parentTreeViewDtoList = en.getValue(); TreeViewDto treeViewDto = new TreeViewDto(); treeViewDto.setName(nodeName); TaskNode taskNode = dag.getNode(nodeName); treeViewDto.setType(taskNode.getType()); treeViewDto.setCode(taskNode.getCode()); //set treeViewDto instances for (int i = limit - 1; i >= 0; i--) { ProcessInstance processInstance = processInstanceList.get(i); TaskInstance taskInstance = taskInstanceMapper.queryByInstanceIdAndName(processInstance.getId(), nodeName); if (taskInstance == null) { treeViewDto.getInstances().add(new Instance(-1, "not running", 0, "null")); } else { Date startTime = taskInstance.getStartTime() == null ? new Date() : taskInstance.getStartTime(); Date endTime = taskInstance.getEndTime() == null ? new Date() : taskInstance.getEndTime(); int subProcessId = 0; // if process is sub process, the return sub id, or sub id=0 if (taskInstance.isSubProcess()) { TaskDefinition taskDefinition = taskDefinitionMap.get(taskInstance.getTaskCode()); subProcessId = Integer.parseInt(JSONUtils.parseObject( taskDefinition.getTaskParams()).path(CMD_PARAM_SUB_PROCESS_DEFINE_ID).asText()); } treeViewDto.getInstances().add(new Instance(taskInstance.getId(), taskInstance.getName(), taskInstance.getTaskCode(), taskInstance.getTaskType(), taskInstance.getState().toString(), taskInstance.getStartTime(), taskInstance.getEndTime(), taskInstance.getHost(), DateUtils.format2Readable(endTime.getTime() - startTime.getTime()), subProcessId)); } } for (TreeViewDto pTreeViewDto : parentTreeViewDtoList) { pTreeViewDto.getChildren().add(treeViewDto); } postNodeList = dag.getSubsequentNodes(nodeName); if (CollectionUtils.isNotEmpty(postNodeList)) { for (String nextNodeName : postNodeList) { List<TreeViewDto> treeViewDtoList = waitingRunningNodeMap.get(nextNodeName); if (CollectionUtils.isEmpty(treeViewDtoList)) { treeViewDtoList = new ArrayList<>(); } treeViewDtoList.add(treeViewDto); waitingRunningNodeMap.put(nextNodeName, treeViewDtoList); } } runningNodeMap.remove(nodeName); } if (waitingRunningNodeMap.size() == 0) { break; } else { runningNodeMap.putAll(waitingRunningNodeMap); waitingRunningNodeMap.clear(); } } result.put(Constants.DATA_LIST, parentTreeViewDto); result.put(Constants.STATUS, Status.SUCCESS); result.put(Constants.MSG, Status.SUCCESS.getMsg()); return result; } /** * whether the graph has a ring * * @param taskNodeResponseList task node response list * @return if graph has cycle flag */ private boolean graphHasCycle(List<TaskNode> taskNodeResponseList) { DAG<String, TaskNode, String> graph = new DAG<>(); // Fill the vertices for (TaskNode taskNodeResponse : taskNodeResponseList) { graph.addNode(taskNodeResponse.getName(), taskNodeResponse); } // Fill edge relations for (TaskNode taskNodeResponse : taskNodeResponseList) { List<String> preTasks = JSONUtils.toList(taskNodeResponse.getPreTasks(), String.class); if (CollectionUtils.isNotEmpty(preTasks)) { for (String preTask : preTasks) { if (!graph.addEdge(preTask, taskNodeResponse.getName())) { return true; } } } } return graph.hasCycle(); } /** * batch copy process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override public Map<String, Object> batchCopyProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, true); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, true); return result; } /** * batch move process definition * * @param loginUser loginUser * @param projectCode projectCode * @param codes processDefinitionCodes * @param targetProjectCode targetProjectCode */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> batchMoveProcessDefinition(User loginUser, long projectCode, String codes, long targetProjectCode) { Map<String, Object> result = checkParams(loginUser, projectCode, codes, targetProjectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (projectCode == targetProjectCode) { return result; } List<String> failedProcessList = new ArrayList<>(); doBatchOperateProcessDefinition(loginUser, targetProjectCode, failedProcessList, codes, result, false); checkBatchOperateResult(projectCode, targetProjectCode, result, failedProcessList, false); return result; } private Map<String, Object> checkParams(User loginUser, long projectCode, String processDefinitionCodes, long targetProjectCode) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } if (StringUtils.isEmpty(processDefinitionCodes)) { putMsg(result, Status.PROCESS_DEFINITION_CODES_IS_EMPTY, processDefinitionCodes); return result; } if (projectCode != targetProjectCode) { Project targetProject = projectMapper.queryByCode(targetProjectCode); //check user access for project Map<String, Object> targetResult = projectService.checkProjectAndAuth(loginUser, targetProject, targetProjectCode); if (targetResult.get(Constants.STATUS) != Status.SUCCESS) { return targetResult; } } return result; } private void doBatchOperateProcessDefinition(User loginUser, long targetProjectCode, List<String> failedProcessList, String processDefinitionCodes, Map<String, Object> result, boolean isCopy) { Set<Long> definitionCodes = Arrays.stream(processDefinitionCodes.split(Constants.COMMA)).map(Long::parseLong).collect(Collectors.toSet()); List<ProcessDefinition> processDefinitionList = processDefinitionMapper.queryByCodes(definitionCodes); Set<Long> queryCodes = processDefinitionList.stream().map(ProcessDefinition::getCode).collect(Collectors.toSet()); // definitionCodes - queryCodes Set<Long> diffCode = definitionCodes.stream().filter(code -> !queryCodes.contains(code)).collect(Collectors.toSet()); diffCode.forEach(code -> failedProcessList.add(code + "[null]")); for (ProcessDefinition processDefinition : processDefinitionList) { List<ProcessTaskRelation> processTaskRelations = processTaskRelationMapper.queryByProcessCode(processDefinition.getProjectCode(), processDefinition.getCode()); List<ProcessTaskRelationLog> taskRelationList = processTaskRelations.stream().map(ProcessTaskRelationLog::new).collect(Collectors.toList()); processDefinition.setProjectCode(targetProjectCode); if (isCopy) { processDefinition.setName(processDefinition.getName() + "_copy_" + DateUtils.getCurrentTimeStamp()); try { result.putAll(createDagDefine(loginUser, taskRelationList, processDefinition, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.COPY_PROCESS_DEFINITION_ERROR); } } else { try { result.putAll(updateDagDefine(loginUser, taskRelationList, processDefinition, null, Lists.newArrayList())); } catch (Exception e) { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR); throw new ServiceException(Status.MOVE_PROCESS_DEFINITION_ERROR); } } if (result.get(Constants.STATUS) != Status.SUCCESS) { failedProcessList.add(processDefinition.getCode() + "[" + processDefinition.getName() + "]"); } } } /** * switch the defined process definition version * * @param loginUser login user * @param projectCode project code * @param code process definition code * @param version the version user want to switch * @return switch process definition version result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> switchProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (Objects.isNull(processDefinition)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_ERROR, code); return result; } ProcessDefinitionLog processDefinitionLog = processDefinitionLogMapper.queryByDefinitionCodeAndVersion(code, version); if (Objects.isNull(processDefinitionLog)) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_NOT_EXIST_PROCESS_DEFINITION_VERSION_ERROR, processDefinition.getCode(), version); return result; } int switchVersion = processService.switchVersion(processDefinition, processDefinitionLog); if (switchVersion > 0) { putMsg(result, Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); throw new ServiceException(Status.SWITCH_PROCESS_DEFINITION_VERSION_ERROR); } putMsg(result, Status.SUCCESS); return result; } /** * check batch operate result * * @param srcProjectCode srcProjectCode * @param targetProjectCode targetProjectCode * @param result result * @param failedProcessList failedProcessList * @param isCopy isCopy */ private void checkBatchOperateResult(long srcProjectCode, long targetProjectCode, Map<String, Object> result, List<String> failedProcessList, boolean isCopy) { if (!failedProcessList.isEmpty()) { if (isCopy) { putMsg(result, Status.COPY_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } else { putMsg(result, Status.MOVE_PROCESS_DEFINITION_ERROR, srcProjectCode, targetProjectCode, String.join(",", failedProcessList)); } } else { putMsg(result, Status.SUCCESS); } } /** * query the pagination versions info by one certain process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param pageNo page number * @param pageSize page size * @param code process definition code * @return the pagination process definition versions info of the certain process definition */ @Override public Result queryProcessDefinitionVersions(User loginUser, long projectCode, int pageNo, int pageSize, long code) { Result result = new Result(); Project project = projectMapper.queryByCode(projectCode); // check user access for project Map<String, Object> checkResult = projectService.checkProjectAndAuth(loginUser, project, projectCode); Status resultStatus = (Status) checkResult.get(Constants.STATUS); if (resultStatus != Status.SUCCESS) { putMsg(result, resultStatus); return result; } PageInfo<ProcessDefinitionLog> pageInfo = new PageInfo<>(pageNo, pageSize); Page<ProcessDefinitionLog> page = new Page<>(pageNo, pageSize); IPage<ProcessDefinitionLog> processDefinitionVersionsPaging = processDefinitionLogMapper.queryProcessDefinitionVersionsPaging(page, code); List<ProcessDefinitionLog> processDefinitionLogs = processDefinitionVersionsPaging.getRecords(); pageInfo.setTotalList(processDefinitionLogs); pageInfo.setTotal((int) processDefinitionVersionsPaging.getTotal()); result.setData(pageInfo); putMsg(result, Status.SUCCESS); return result; } /** * delete one certain process definition by version number and process definition code * * @param loginUser login user info to check auth * @param projectCode project code * @param code process definition code * @param version version number * @return delete result code */ @Override @Transactional(rollbackFor = RuntimeException.class) public Map<String, Object> deleteProcessDefinitionVersion(User loginUser, long projectCode, long code, int version) { Project project = projectMapper.queryByCode(projectCode); //check user access for project Map<String, Object> result = projectService.checkProjectAndAuth(loginUser, project, projectCode); if (result.get(Constants.STATUS) != Status.SUCCESS) { return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByCode(code); if (processDefinition == null) { putMsg(result, Status.PROCESS_DEFINE_NOT_EXIST, code); } else { int deleteLog = processDefinitionLogMapper.deleteByProcessDefinitionCodeAndVersion(code, version); int deleteRelationLog = processTaskRelationLogMapper.deleteByCode(processDefinition.getCode(), processDefinition.getVersion()); if (deleteLog == 0 || deleteRelationLog == 0) { putMsg(result, Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); throw new ServiceException(Status.DELETE_PROCESS_DEFINE_BY_CODE_ERROR); } putMsg(result, Status.SUCCESS); } return result; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,366
[Bug] [API][service] checkResourceExists function use the magic number userId '0'
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened when the resource exactly exists,but the userId is not '0', the function will not take effect. ### What you expected to happen the resources should be operate by the loginUser, sow the userId should be the loginUser's id ### How to reproduce the resources should be operate by the loginUser, sow the userId should be the loginUser's id ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6366
https://github.com/apache/dolphinscheduler/pull/6368
545173faa31e477f9487e98611f1951cf43efa2b
7413b578410a53c4839920edfdf98ac89344b959
"2021-09-27T02:48:04Z"
java
"2021-09-27T08:58:26Z"
dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/impl/ResourcesServiceImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.api.service.impl; import static org.apache.dolphinscheduler.common.Constants.ALIAS; import static org.apache.dolphinscheduler.common.Constants.CONTENT; import static org.apache.dolphinscheduler.common.Constants.JAR; import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent; import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter; import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor; import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor; import org.apache.dolphinscheduler.api.enums.Status; import org.apache.dolphinscheduler.api.exceptions.ServiceException; import org.apache.dolphinscheduler.api.service.ResourcesService; import org.apache.dolphinscheduler.api.utils.PageInfo; import org.apache.dolphinscheduler.api.utils.RegexUtils; import org.apache.dolphinscheduler.api.utils.Result; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.ProgramType; import org.apache.dolphinscheduler.common.enums.ResourceType; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.FileUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.PropertyUtils; import org.apache.dolphinscheduler.dao.entity.Resource; import org.apache.dolphinscheduler.dao.entity.ResourcesUser; import org.apache.dolphinscheduler.dao.entity.Tenant; import org.apache.dolphinscheduler.dao.entity.UdfFunc; import org.apache.dolphinscheduler.dao.entity.User; import org.apache.dolphinscheduler.dao.mapper.ProcessDefinitionMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceMapper; import org.apache.dolphinscheduler.dao.mapper.ResourceUserMapper; import org.apache.dolphinscheduler.dao.mapper.TenantMapper; import org.apache.dolphinscheduler.dao.mapper.UdfFuncMapper; import org.apache.dolphinscheduler.dao.mapper.UserMapper; import org.apache.dolphinscheduler.dao.utils.ResourceProcessDefinitionUtils; import org.apache.commons.beanutils.BeanMap; import org.apache.commons.lang.StringUtils; import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.regex.Matcher; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import org.springframework.web.multipart.MultipartFile; import com.baomidou.mybatisplus.core.metadata.IPage; import com.baomidou.mybatisplus.extension.plugins.pagination.Page; import com.fasterxml.jackson.databind.SerializationFeature; /** * resources service impl */ @Service public class ResourcesServiceImpl extends BaseServiceImpl implements ResourcesService { private static final Logger logger = LoggerFactory.getLogger(ResourcesServiceImpl.class); @Autowired private ResourceMapper resourcesMapper; @Autowired private UdfFuncMapper udfFunctionMapper; @Autowired private TenantMapper tenantMapper; @Autowired private UserMapper userMapper; @Autowired private ResourceUserMapper resourceUserMapper; @Autowired private ProcessDefinitionMapper processDefinitionMapper; /** * create directory * * @param loginUser login user * @param name alias * @param description description * @param type type * @param pid parent id * @param currentDir current directory * @return create directory result */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createDirectory(User loginUser, String name, String description, ResourceType type, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (DuplicateKeyException e) { logger.error("resource directory {} has exist, can't recreate", fullName); putMsg(result, Status.RESOURCE_EXIST); return result; } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } //create directory in hdfs createDirectory(loginUser,fullName,type,result); return result; } /** * create resource * * @param loginUser login user * @param name alias * @param desc description * @param file file * @param type type * @param pid parent id * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> createResource(User loginUser, String name, String desc, ResourceType type, MultipartFile file, int pid, String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyPid(loginUser, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // check resource name exists String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource {} has exist, can't recreate", RegexUtils.escapeNRT(name)); putMsg(result, Status.RESOURCE_EXIST); return result; } Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now); try { resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!"class".equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error("resource already exists, can't recreate ", e); throw new ServiceException("resource already exists, can't recreate"); } // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", RegexUtils.escapeNRT(name), RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } return result; } /** * check resource is exists * * @param fullName fullName * @param userId user id * @param type type * @return true if resource exists */ private boolean checkResourceExists(String fullName, int userId, int type) { Boolean existResource = resourcesMapper.existResource(fullName, userId, type); return existResource == Boolean.TRUE; } /** * update resource * @param loginUser login user * @param resourceId resource id * @param name name * @param desc description * @param type resource type * @param file resource file * @return update result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResource(User loginUser, int resourceId, String name, String desc, ResourceType type, MultipartFile file) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } if (file == null && name.equals(resource.getAlias()) && desc.equals(resource.getDescription())) { putMsg(result, Status.SUCCESS); return result; } //check resource already exists String originFullName = resource.getFullName(); String originResourceName = resource.getAlias(); String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/") + 1),name); if (!originResourceName.equals(name) && checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource {} already exists, can't recreate", name); putMsg(result, Status.RESOURCE_EXIST); return result; } result = verifyFile(name, type, file); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // query tenant by user id String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // verify whether the resource exists in storage // get the path of origin file in storage String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName); try { if (!HadoopUtils.getInstance().exists(originHdfsFileName)) { logger.error("{} not exist", originHdfsFileName); putMsg(result,Status.RESOURCE_NOT_EXIST); return result; } } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(Status.HDFS_OPERATION_ERROR); } if (!resource.isDirectory()) { //get the origin file suffix String originSuffix = FileUtils.suffix(originFullName); String suffix = FileUtils.suffix(fullName); boolean suffixIsChanged = false; if (StringUtils.isBlank(suffix) && StringUtils.isNotBlank(originSuffix)) { suffixIsChanged = true; } if (StringUtils.isNotBlank(suffix) && !suffix.equals(originSuffix)) { suffixIsChanged = true; } //verify whether suffix is changed if (suffixIsChanged) { //need verify whether this resource is authorized to other users Map<String, Object> columnMap = new HashMap<>(); columnMap.put("resources_id", resourceId); List<ResourcesUser> resourcesUsers = resourceUserMapper.selectByMap(columnMap); if (CollectionUtils.isNotEmpty(resourcesUsers)) { List<Integer> userIds = resourcesUsers.stream().map(ResourcesUser::getUserId).collect(Collectors.toList()); List<User> users = userMapper.selectBatchIds(userIds); String userNames = users.stream().map(User::getUserName).collect(Collectors.toList()).toString(); logger.error("resource is authorized to user {},suffix not allowed to be modified", userNames); putMsg(result,Status.RESOURCE_IS_AUTHORIZED,userNames); return result; } } } // updateResource data Date now = new Date(); resource.setAlias(name); resource.setFileName(name); resource.setFullName(fullName); resource.setDescription(desc); resource.setUpdateTime(now); if (file != null) { resource.setSize(file.getSize()); } try { resourcesMapper.updateById(resource); if (resource.isDirectory()) { List<Integer> childrenResource = listAllChildren(resource,false); if (CollectionUtils.isNotEmpty(childrenResource)) { String matcherFullName = Matcher.quoteReplacement(fullName); List<Resource> childResourceList; Integer[] childResIdArray = childrenResource.toArray(new Integer[childrenResource.size()]); List<Resource> resourceList = resourcesMapper.listResourceByIds(childResIdArray); childResourceList = resourceList.stream().map(t -> { t.setFullName(t.getFullName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); resourcesMapper.batchUpdateResource(childResourceList); if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(childResIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(t.getResourceName().replaceFirst(originFullName, matcherFullName)); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } } } else if (ResourceType.UDF.equals(resource.getType())) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(new Integer[]{resourceId}); if (CollectionUtils.isNotEmpty(udfFuncs)) { udfFuncs = udfFuncs.stream().map(t -> { t.setResourceName(fullName); t.setUpdateTime(now); return t; }).collect(Collectors.toList()); udfFunctionMapper.batchUpdateUdfFunc(udfFuncs); } } putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); } catch (Exception e) { logger.error(Status.UPDATE_RESOURCE_ERROR.getMsg(), e); throw new ServiceException(Status.UPDATE_RESOURCE_ERROR); } // if name unchanged, return directly without moving on HDFS if (originResourceName.equals(name) && file == null) { return result; } if (file != null) { // fail upload if (!upload(loginUser, fullName, file, type)) { logger.error("upload resource: {} file: {} failed.", name, RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename())); } if (!fullName.equals(originFullName)) { try { HadoopUtils.getInstance().delete(originHdfsFileName,false); } catch (IOException e) { logger.error(e.getMessage(),e); throw new ServiceException(String.format("delete resource: %s failed.", originFullName)); } } return result; } // get the path of dest file in hdfs String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName); try { logger.info("start hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName); HadoopUtils.getInstance().copy(originHdfsFileName, destHdfsFileName, true, true); } catch (Exception e) { logger.error(MessageFormat.format("hdfs copy {0} -> {1} fail", originHdfsFileName, destHdfsFileName), e); putMsg(result,Status.HDFS_COPY_FAIL); throw new ServiceException(Status.HDFS_COPY_FAIL); } return result; } private Result<Object> verifyFile(String name, ResourceType type, MultipartFile file) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (file != null) { // file is empty if (file.isEmpty()) { logger.error("file is empty: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_FILE_IS_EMPTY); return result; } // file suffix String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String nameSuffix = FileUtils.suffix(name); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { // rename file suffix and original suffix must be consistent logger.error("rename file suffix and original suffix must be consistent: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SUFFIX_FORBID_CHANGE); return result; } //If resource type is UDF, only jar packages are allowed to be uploaded, and the suffix must be .jar if (Constants.UDF.equals(type.name()) && !JAR.equalsIgnoreCase(fileSuffix)) { logger.error(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg()); putMsg(result, Status.UDF_RESOURCE_SUFFIX_NOT_JAR); return result; } if (file.getSize() > Constants.MAX_FILE_SIZE) { logger.error("file size is too large: {}", RegexUtils.escapeNRT(file.getOriginalFilename())); putMsg(result, Status.RESOURCE_SIZE_EXCEED_LIMIT); return result; } } return result; } /** * query resources list paging * * @param loginUser login user * @param type resource type * @param searchVal search value * @param pageNo page number * @param pageSize page size * @return resource list page */ @Override public Result queryResourceListPaging(User loginUser, int directoryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) { Result result = new Result(); Page<Resource> page = new Page<>(pageNo, pageSize); int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; } if (directoryId != -1) { Resource directory = resourcesMapper.selectById(directoryId); if (directory == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } } List<Integer> resourcesIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, 0); IPage<Resource> resourceIPage = resourcesMapper.queryResourcePaging(page, userId, directoryId, type.ordinal(), searchVal,resourcesIds); PageInfo<Resource> pageInfo = new PageInfo<>(pageNo, pageSize); pageInfo.setTotal((int)resourceIPage.getTotal()); pageInfo.setTotalList(resourceIPage.getRecords()); result.setData(pageInfo); putMsg(result,Status.SUCCESS); return result; } /** * create directory * @param loginUser login user * @param fullName full name * @param type resource type * @param result Result */ private void createDirectory(User loginUser,String fullName,ResourceType type,Result<Object> result) { String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode); try { if (!HadoopUtils.getInstance().exists(resourceRootPath)) { createTenantDirIfNotExists(tenantCode); } if (!HadoopUtils.getInstance().mkdir(directoryName)) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } catch (Exception e) { logger.error("create resource directory {} of hdfs failed",directoryName); putMsg(result,Status.HDFS_OPERATION_ERROR); throw new ServiceException(String.format("create resource directory: %s failed.", directoryName)); } } /** * upload file to hdfs * * @param loginUser login user * @param fullName full name * @param file file */ private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) { // save to local String fileSuffix = FileUtils.suffix(file.getOriginalFilename()); String nameSuffix = FileUtils.suffix(fullName); // determine file suffix if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) { return false; } // query tenant String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); // random file name String localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); // save file to hdfs, and delete original file String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode); try { // if tenant dir not exists if (!HadoopUtils.getInstance().exists(resourcePath)) { createTenantDirIfNotExists(tenantCode); } org.apache.dolphinscheduler.api.utils.FileUtils.copyFile(file, localFilename); HadoopUtils.getInstance().copyLocalToHdfs(localFilename, hdfsFilename, true, true); } catch (Exception e) { try { FileUtils.deleteFile(localFilename); } catch (IOException ex) { logger.error("delete local tmp file:{} error", localFilename, ex); } logger.error(e.getMessage(), e); return false; } return true; } /** * query resource list * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceList(User loginUser, ResourceType type) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); Visitor resourceTreeVisitor = new ResourceTreeVisitor(allResourceList); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * query resource list by program type * * @param loginUser login user * @param type resource type * @return resource list */ @Override public Map<String, Object> queryResourceByProgramType(User loginUser, ResourceType type, ProgramType programType) { Map<String, Object> result = new HashMap<>(); List<Resource> allResourceList = queryAuthoredResourceList(loginUser, type); String suffix = ".jar"; if (programType != null) { switch (programType) { case JAVA: case SCALA: break; case PYTHON: suffix = ".py"; break; default: } } List<Resource> resources = new ResourceFilter(suffix, new ArrayList<>(allResourceList)).filter(); Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources); result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * delete resource * * @param loginUser login user * @param resourceId resource id * @return delete result code * @throws IOException exception */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> delete(User loginUser, int resourceId) throws IOException { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, resource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // get all resource id of process definitions those is released List<Map<String, Object>> list = processDefinitionMapper.listResources(); Map<Integer, Set<Long>> resourceProcessMap = ResourceProcessDefinitionUtils.getResourceProcessDefinitionMap(list); Set<Integer> resourceIdSet = resourceProcessMap.keySet(); // get all children of the resource List<Integer> allChildren = listAllChildren(resource,true); Integer[] needDeleteResourceIdArray = allChildren.toArray(new Integer[allChildren.size()]); //if resource type is UDF,need check whether it is bound by UDF function if (resource.getType() == (ResourceType.UDF)) { List<UdfFunc> udfFuncs = udfFunctionMapper.listUdfByResourceId(needDeleteResourceIdArray); if (CollectionUtils.isNotEmpty(udfFuncs)) { logger.error("can't be deleted,because it is bound by UDF functions:{}", udfFuncs); putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName()); return result; } } if (resourceIdSet.contains(resource.getPid())) { logger.error("can't be deleted,because it is used of process definition"); putMsg(result, Status.RESOURCE_IS_USED); return result; } resourceIdSet.retainAll(allChildren); if (CollectionUtils.isNotEmpty(resourceIdSet)) { logger.error("can't be deleted,because it is used of process definition"); for (Integer resId : resourceIdSet) { logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId)); } putMsg(result, Status.RESOURCE_IS_USED); return result; } // get hdfs file by type String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); //delete data in database resourcesMapper.deleteIds(needDeleteResourceIdArray); resourceUserMapper.deleteResourceUserArray(0, needDeleteResourceIdArray); //delete file on hdfs HadoopUtils.getInstance().delete(hdfsFilename, true); putMsg(result, Status.SUCCESS); return result; } /** * verify resource by name and type * @param loginUser login user * @param fullName resource full name * @param type resource type * @return true if the resource name not exists, otherwise return false */ @Override public Result<Object> verifyResourceName(String fullName, ResourceType type, User loginUser) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (checkResourceExists(fullName, 0, type.ordinal())) { logger.error("resource type:{} name:{} has exist, can't create again.", type, RegexUtils.escapeNRT(fullName)); putMsg(result, Status.RESOURCE_EXIST); } else { // query tenant Tenant tenant = tenantMapper.queryById(loginUser.getTenantId()); if (tenant != null) { String tenantCode = tenant.getTenantCode(); try { String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName); if (HadoopUtils.getInstance().exists(hdfsFilename)) { logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, RegexUtils.escapeNRT(fullName), hdfsFilename); putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename); } } catch (Exception e) { logger.error(e.getMessage(),e); putMsg(result,Status.HDFS_OPERATION_ERROR); } } else { putMsg(result,Status.TENANT_NOT_EXIST); } } return result; } /** * verify resource by full name or pid and type * @param fullName resource full name * @param id resource id * @param type resource type * @return true if the resource full name or pid not exists, otherwise return false */ @Override public Result<Object> queryResource(String fullName, Integer id, ResourceType type) { Result<Object> result = new Result<>(); if (StringUtils.isBlank(fullName) && id == null) { putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR); return result; } if (StringUtils.isNotBlank(fullName)) { List<Resource> resourceList = resourcesMapper.queryResource(fullName,type.ordinal()); if (CollectionUtils.isEmpty(resourceList)) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(resourceList.get(0)); } else { Resource resource = resourcesMapper.selectById(id); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } Resource parentResource = resourcesMapper.selectById(resource.getPid()); if (parentResource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } putMsg(result, Status.SUCCESS); result.setData(parentResource); } return result; } /** * view resource file online * * @param resourceId resource id * @param skipLineNum skip line number * @param limit limit * @return resource content */ @Override public Result<Object> readResource(int resourceId, int skipLineNum, int limit) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // get resource by id Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check preview or not by file suffix String nameSuffix = FileUtils.suffix(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } // hdfs path String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName()); logger.info("resource hdfs path is {}", hdfsFileName); try { if (HadoopUtils.getInstance().exists(hdfsFileName)) { List<String> content = HadoopUtils.getInstance().catFile(hdfsFileName, skipLineNum, limit); putMsg(result, Status.SUCCESS); Map<String, Object> map = new HashMap<>(); map.put(ALIAS, resource.getAlias()); map.put(CONTENT, String.join("\n", content)); result.setData(map); } else { logger.error("read file {} not exist in hdfs", hdfsFileName); putMsg(result, Status.RESOURCE_FILE_NOT_EXIST,hdfsFileName); } } catch (Exception e) { logger.error("Resource {} read failed", hdfsFileName, e); putMsg(result, Status.HDFS_OPERATION_ERROR); } return result; } /** * create resource file online * * @param loginUser login user * @param type resource type * @param fileName file name * @param fileSuffix file suffix * @param desc description * @param content content * @param pid pid * @param currentDir current directory * @return create result code */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDir) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } //check file suffix String nameSuffix = fileSuffix.trim(); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support create", nameSuffix); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String name = fileName.trim() + "." + nameSuffix; String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name) : String.format("%s/%s",currentDir,name); result = verifyResource(loginUser, type, fullName, pid); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } // save data Date now = new Date(); Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now); resourcesMapper.insert(resource); putMsg(result, Status.SUCCESS); Map<Object, Object> dataMap = new BeanMap(resource); Map<String, Object> resultMap = new HashMap<>(); for (Map.Entry<Object, Object> entry: dataMap.entrySet()) { if (!Constants.CLASS.equalsIgnoreCase(entry.getKey().toString())) { resultMap.put(entry.getKey().toString(), entry.getValue()); } } result.setData(resultMap); String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode(); result = uploadContentToHdfs(fullName, tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } private Result<Object> checkResourceUploadStartupState() { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); putMsg(result, Status.HDFS_NOT_STARTUP); return result; } return result; } private Result<Object> verifyResource(User loginUser, ResourceType type, String fullName, int pid) { Result<Object> result = verifyResourceName(fullName, type, loginUser); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } return verifyPid(loginUser, pid); } private Result<Object> verifyPid(User loginUser, int pid) { Result<Object> result = new Result<>(); putMsg(result, Status.SUCCESS); if (pid != -1) { Resource parentResource = resourcesMapper.selectById(pid); if (parentResource == null) { putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST); return result; } if (!hasPerm(loginUser, parentResource.getUserId())) { putMsg(result, Status.USER_NO_OPERATION_PERM); return result; } } return result; } /** * updateProcessInstance resource * * @param resourceId resource id * @param content content * @return update result cod */ @Override @Transactional(rollbackFor = Exception.class) public Result<Object> updateResourceContent(int resourceId, String content) { Result<Object> result = checkResourceUploadStartupState(); if (!result.getCode().equals(Status.SUCCESS.getCode())) { return result; } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("read file not exist, resource id {}", resourceId); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } //check can edit by file suffix String nameSuffix = FileUtils.suffix(resource.getAlias()); String resourceViewSuffixs = FileUtils.getResourceViewSuffixs(); if (StringUtils.isNotEmpty(resourceViewSuffixs)) { List<String> strList = Arrays.asList(resourceViewSuffixs.split(",")); if (!strList.contains(nameSuffix)) { logger.error("resource suffix {} not support updateProcessInstance, resource id {}", nameSuffix, resourceId); putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW); return result; } } String tenantCode = getTenantCode(resource.getUserId(),result); if (StringUtils.isEmpty(tenantCode)) { return result; } resource.setSize(content.getBytes().length); resource.setUpdateTime(new Date()); resourcesMapper.updateById(resource); result = uploadContentToHdfs(resource.getFullName(), tenantCode, content); if (!result.getCode().equals(Status.SUCCESS.getCode())) { throw new ServiceException(result.getMsg()); } return result; } /** * @param resourceName resource name * @param tenantCode tenant code * @param content content * @return result */ private Result<Object> uploadContentToHdfs(String resourceName, String tenantCode, String content) { Result<Object> result = new Result<>(); String localFilename = ""; String hdfsFileName = ""; try { localFilename = FileUtils.getUploadFilename(tenantCode, UUID.randomUUID().toString()); if (!FileUtils.writeContent2File(content, localFilename)) { // write file fail logger.error("file {} fail, content is {}", localFilename, RegexUtils.escapeNRT(content)); putMsg(result, Status.RESOURCE_NOT_EXIST); return result; } // get resource file hdfs path hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName); String resourcePath = HadoopUtils.getHdfsResDir(tenantCode); logger.info("resource hdfs path is {}, resource dir is {}", hdfsFileName, resourcePath); HadoopUtils hadoopUtils = HadoopUtils.getInstance(); if (!hadoopUtils.exists(resourcePath)) { // create if tenant dir not exists createTenantDirIfNotExists(tenantCode); } if (hadoopUtils.exists(hdfsFileName)) { hadoopUtils.delete(hdfsFileName, false); } hadoopUtils.copyLocalToHdfs(localFilename, hdfsFileName, true, true); } catch (Exception e) { logger.error(e.getMessage(), e); result.setCode(Status.HDFS_OPERATION_ERROR.getCode()); result.setMsg(String.format("copy %s to hdfs %s fail", localFilename, hdfsFileName)); return result; } putMsg(result, Status.SUCCESS); return result; } /** * download file * * @param resourceId resource id * @return resource content * @throws IOException exception */ @Override public org.springframework.core.io.Resource downloadResource(int resourceId) throws IOException { // if resource upload startup if (!PropertyUtils.getResUploadStartupState()) { logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState()); throw new ServiceException("hdfs not startup"); } Resource resource = resourcesMapper.selectById(resourceId); if (resource == null) { logger.error("download file not exist, resource id {}", resourceId); return null; } if (resource.isDirectory()) { logger.error("resource id {} is directory,can't download it", resourceId); throw new ServiceException("can't download directory"); } int userId = resource.getUserId(); User user = userMapper.selectById(userId); if (user == null) { logger.error("user id {} not exists", userId); throw new ServiceException(String.format("resource owner id %d not exist",userId)); } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant id {} not exists", user.getTenantId()); throw new ServiceException(String.format("The tenant id %d of resource owner not exist",user.getTenantId())); } String tenantCode = tenant.getTenantCode(); String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName()); String localFileName = FileUtils.getDownloadFilename(resource.getAlias()); logger.info("resource hdfs path is {}, download local filename is {}", hdfsFileName, localFileName); HadoopUtils.getInstance().copyHdfsToLocal(hdfsFileName, localFileName, false, true); return org.apache.dolphinscheduler.api.utils.FileUtils.file2Resource(localFileName); } /** * list all file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> authorizeResourceTree(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<ResourceComponent> list; if (CollectionUtils.isNotEmpty(resourceList)) { Visitor visitor = new ResourceTreeVisitor(resourceList); list = visitor.visit().getChildren(); } else { list = new ArrayList<>(0); } result.put(Constants.DATA_LIST, list); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized file * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> resourceList = resourcesMapper.queryResourceExceptUserId(userId); List<Resource> list; if (resourceList != null && !resourceList.isEmpty()) { Set<Resource> resourceSet = new HashSet<>(resourceList); List<Resource> authedResourceList = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); getAuthorizedResourceList(resourceSet, authedResourceList); list = new ArrayList<>(resourceSet); } else { list = new ArrayList<>(0); } Visitor visitor = new ResourceTreeVisitor(list); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result, Status.SUCCESS); return result; } /** * unauthorized udf function * * @param loginUser login user * @param userId user id * @return unauthorized result code */ @Override public Map<String, Object> unauthorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); //only admin can operate if (isNotAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncList = udfFunctionMapper.queryUdfFuncExceptUserId(userId); List<UdfFunc> resultList = new ArrayList<>(); Set<UdfFunc> udfFuncSet; if (CollectionUtils.isNotEmpty(udfFuncList)) { udfFuncSet = new HashSet<>(udfFuncList); List<UdfFunc> authedUDFFuncList = udfFunctionMapper.queryAuthedUdfFunc(userId); getAuthorizedResourceList(udfFuncSet, authedUDFFuncList); resultList = new ArrayList<>(udfFuncSet); } result.put(Constants.DATA_LIST, resultList); putMsg(result, Status.SUCCESS); return result; } /** * authorized udf function * * @param loginUser login user * @param userId user id * @return authorized result code */ @Override public Map<String, Object> authorizedUDFFunction(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<UdfFunc> udfFuncs = udfFunctionMapper.queryAuthedUdfFunc(userId); result.put(Constants.DATA_LIST, udfFuncs); putMsg(result, Status.SUCCESS); return result; } /** * authorized file * * @param loginUser login user * @param userId user id * @return authorized result */ @Override public Map<String, Object> authorizedFile(User loginUser, Integer userId) { Map<String, Object> result = new HashMap<>(); if (isNotAdmin(loginUser, result)) { return result; } List<Resource> authedResources = queryResourceList(userId, Constants.AUTHORIZE_WRITABLE_PERM); Visitor visitor = new ResourceTreeVisitor(authedResources); String visit = JSONUtils.toJsonString(visitor.visit(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(visit); String jsonTreeStr = JSONUtils.toJsonString(visitor.visit().getChildren(), SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS); logger.info(jsonTreeStr); result.put(Constants.DATA_LIST, visitor.visit().getChildren()); putMsg(result,Status.SUCCESS); return result; } /** * get authorized resource list * * @param resourceSet resource set * @param authedResourceList authorized resource list */ private void getAuthorizedResourceList(Set<?> resourceSet, List<?> authedResourceList) { Set<?> authedResourceSet; if (CollectionUtils.isNotEmpty(authedResourceList)) { authedResourceSet = new HashSet<>(authedResourceList); resourceSet.removeAll(authedResourceSet); } } /** * get tenantCode by UserId * * @param userId user id * @param result return result * @return tenant code */ private String getTenantCode(int userId,Result<Object> result) { User user = userMapper.selectById(userId); if (user == null) { logger.error("user {} not exists", userId); putMsg(result, Status.USER_NOT_EXIST,userId); return null; } Tenant tenant = tenantMapper.queryById(user.getTenantId()); if (tenant == null) { logger.error("tenant not exists"); putMsg(result, Status.TENANT_NOT_EXIST); return null; } return tenant.getTenantCode(); } /** * list all children id * @param resource resource * @param containSelf whether add self to children list * @return all children id */ List<Integer> listAllChildren(Resource resource,boolean containSelf) { List<Integer> childList = new ArrayList<>(); if (resource.getId() != -1 && containSelf) { childList.add(resource.getId()); } if (resource.isDirectory()) { listAllChildren(resource.getId(),childList); } return childList; } /** * list all children id * @param resourceId resource id * @param childList child list */ void listAllChildren(int resourceId,List<Integer> childList) { List<Integer> children = resourcesMapper.listChildren(resourceId); for (int childId : children) { childList.add(childId); listAllChildren(childId, childList); } } /** * query authored resource list (own and authorized) * @param loginUser login user * @param type ResourceType * @return all authored resource list */ private List<Resource> queryAuthoredResourceList(User loginUser, ResourceType type) { List<Resource> relationResources; int userId = loginUser.getId(); if (isAdmin(loginUser)) { userId = 0; relationResources = new ArrayList<>(); } else { // query resource relation relationResources = queryResourceList(userId, 0); } List<Resource> ownResourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal()); ownResourceList.addAll(relationResources); return ownResourceList; } /** * query resource list by userId and perm * @param userId userId * @param perm perm * @return resource list */ private List<Resource> queryResourceList(Integer userId, int perm) { List<Integer> resIds = resourceUserMapper.queryResourcesIdListByUserIdAndPerm(userId, perm); return CollectionUtils.isEmpty(resIds) ? new ArrayList<>() : resourcesMapper.queryResourceListById(resIds); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,353
[Bug] [Master] dispatch task to worker very slowly
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch: dev i find master dispatch tasks to worker slowly . mysql> select count(*),state from t_ds_task_instance group by state; +----------+-------+ | count(*) | state | +----------+-------+ | 15347 | 7 | | 12 | 1 | +----------+-------+ 2 rows in set (0.01 sec) mysql> select count(*),state from t_ds_process_instance group by state; +----------+-------+ | count(*) | state | +----------+-------+ | 892 | 7 | | 12109 | 1 | +----------+-------+ ### What you expected to happen Optimize the speed of dispatch task ### How to reproduce create a dag ![image](https://user-images.githubusercontent.com/29528966/134792830-967b6674-52c8-4241-a02e-c4c23b593904.png) schedule the dag '0 * * * * ? *' run the dag with 'complement data' you will find the process_instance count: 12109 but the task (state=1) number is 12 This phenomenon is not normal ### Anything else _No response_ ### Are you willing to submit PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6353
https://github.com/apache/dolphinscheduler/pull/6375
b2a2fbda1d25c4eb18cf58ceb6d062b74855cedd
67094d0f7f1803e263e73a0164ecbbd829c57129
"2021-09-26T03:49:00Z"
java
"2021-09-27T11:31:22Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/WorkflowExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.master.runner; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_END_DATE; import static org.apache.dolphinscheduler.common.Constants.CMDPARAM_COMPLEMENT_DATA_START_DATE; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_RECOVERY_START_NODE_STRING; import static org.apache.dolphinscheduler.common.Constants.CMD_PARAM_START_NODE_NAMES; import static org.apache.dolphinscheduler.common.Constants.DEFAULT_WORKER_GROUP; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.DependResult; import org.apache.dolphinscheduler.common.enums.Direct; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.FailureStrategy; import org.apache.dolphinscheduler.common.enums.Flag; import org.apache.dolphinscheduler.common.enums.Priority; import org.apache.dolphinscheduler.common.enums.StateEvent; import org.apache.dolphinscheduler.common.enums.StateEventType; import org.apache.dolphinscheduler.common.enums.TaskDependType; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.common.graph.DAG; import org.apache.dolphinscheduler.common.model.TaskNode; import org.apache.dolphinscheduler.common.model.TaskNodeRelation; import org.apache.dolphinscheduler.common.process.ProcessDag; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.thread.ThreadUtils; import org.apache.dolphinscheduler.common.utils.CollectionUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.NetUtils; import org.apache.dolphinscheduler.common.utils.ParameterUtils; import org.apache.dolphinscheduler.dao.entity.Environment; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.ProjectUser; import org.apache.dolphinscheduler.dao.entity.Schedule; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.dao.utils.DagHelper; import org.apache.dolphinscheduler.remote.command.HostUpdateCommand; import org.apache.dolphinscheduler.remote.utils.Host; import org.apache.dolphinscheduler.server.master.config.MasterConfig; import org.apache.dolphinscheduler.server.master.dispatch.executor.NettyExecutorManager; import org.apache.dolphinscheduler.server.master.runner.task.ITaskProcessor; import org.apache.dolphinscheduler.server.master.runner.task.TaskAction; import org.apache.dolphinscheduler.server.master.runner.task.TaskProcessorFactory; import org.apache.dolphinscheduler.service.alert.ProcessAlertManager; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.quartz.cron.CronUtils; import org.apache.dolphinscheduler.service.queue.PeerTaskInstancePriorityQueue; import org.apache.commons.lang.StringUtils; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutorService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.google.common.collect.HashBasedTable; import com.google.common.collect.Lists; import com.google.common.collect.Table; /** * master exec thread,split dag */ public class WorkflowExecuteThread implements Runnable { /** * logger of WorkflowExecuteThread */ private static final Logger logger = LoggerFactory.getLogger(WorkflowExecuteThread.class); /** * runing TaskNode */ private final Map<Integer, ITaskProcessor> activeTaskProcessorMaps = new ConcurrentHashMap<>(); /** * task exec service */ private final ExecutorService taskExecService; /** * process instance */ private ProcessInstance processInstance; /** * submit failure nodes */ private boolean taskFailedSubmit = false; /** * recover node id list */ private List<TaskInstance> recoverNodeIdList = new ArrayList<>(); /** * error task list */ private Map<String, TaskInstance> errorTaskList = new ConcurrentHashMap<>(); /** * complete task list */ private Map<String, TaskInstance> completeTaskList = new ConcurrentHashMap<>(); /** * ready to submit task queue */ private PeerTaskInstancePriorityQueue readyToSubmitTaskQueue = new PeerTaskInstancePriorityQueue(); /** * depend failed task map */ private Map<String, TaskInstance> dependFailedTask = new ConcurrentHashMap<>(); /** * forbidden task map */ private Map<String, TaskNode> forbiddenTaskList = new ConcurrentHashMap<>(); /** * skip task map */ private Map<String, TaskNode> skipTaskNodeList = new ConcurrentHashMap<>(); /** * recover tolerance fault task list */ private List<TaskInstance> recoverToleranceFaultTaskList = new ArrayList<>(); /** * alert manager */ private ProcessAlertManager processAlertManager; /** * the object of DAG */ private DAG<String, TaskNode, TaskNodeRelation> dag; /** * process service */ private ProcessService processService; /** * master config */ private MasterConfig masterConfig; /** * */ private NettyExecutorManager nettyExecutorManager; private ConcurrentLinkedQueue<StateEvent> stateEvents = new ConcurrentLinkedQueue<>(); private List<Date> complementListDate = Lists.newLinkedList(); private Table<Integer, Long, TaskInstance> taskInstanceHashMap = HashBasedTable.create(); private ProcessDefinition processDefinition; private String key; private ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList; /** * start flag, true: start nodes submit completely * */ private boolean isStart = false; /** * constructor of WorkflowExecuteThread * * @param processInstance processInstance * @param processService processService * @param nettyExecutorManager nettyExecutorManager * @param taskTimeoutCheckList */ public WorkflowExecuteThread(ProcessInstance processInstance , ProcessService processService , NettyExecutorManager nettyExecutorManager , ProcessAlertManager processAlertManager , MasterConfig masterConfig , ConcurrentHashMap<Integer, TaskInstance> taskTimeoutCheckList) { this.processService = processService; this.processInstance = processInstance; this.masterConfig = masterConfig; int masterTaskExecNum = masterConfig.getMasterExecTaskNum(); this.taskExecService = ThreadUtils.newDaemonFixedThreadExecutor("Master-Task-Exec-Thread", masterTaskExecNum); this.nettyExecutorManager = nettyExecutorManager; this.processAlertManager = processAlertManager; this.taskTimeoutCheckList = taskTimeoutCheckList; } @Override public void run() { try { startProcess(); handleEvents(); } catch (Exception e) { logger.error("handler error:", e); } } /** * the process start nodes are submitted completely. * @return */ public boolean isStart() { return this.isStart; } private void handleEvents() { while (this.stateEvents.size() > 0) { try { StateEvent stateEvent = this.stateEvents.peek(); if (stateEventHandler(stateEvent)) { this.stateEvents.remove(stateEvent); } } catch (Exception e) { logger.error("state handle error:", e); } } } public String getKey() { if (StringUtils.isNotEmpty(key) || this.processDefinition == null) { return key; } key = String.format("{}_{}_{}", this.processDefinition.getCode(), this.processDefinition.getVersion(), this.processInstance.getId()); return key; } public boolean addStateEvent(StateEvent stateEvent) { if (processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.info("state event would be abounded :{}", stateEvent.toString()); return false; } this.stateEvents.add(stateEvent); return true; } public int eventSize() { return this.stateEvents.size(); } public ProcessInstance getProcessInstance() { return this.processInstance; } private boolean stateEventHandler(StateEvent stateEvent) { logger.info("process event: {}", stateEvent.toString()); if (!checkStateEvent(stateEvent)) { return false; } boolean result = false; switch (stateEvent.getType()) { case PROCESS_STATE_CHANGE: result = processStateChangeHandler(stateEvent); break; case TASK_STATE_CHANGE: result = taskStateChangeHandler(stateEvent); break; case PROCESS_TIMEOUT: result = processTimeout(); break; case TASK_TIMEOUT: result = taskTimeout(stateEvent); break; default: break; } if (result) { this.stateEvents.remove(stateEvent); } return result; } private boolean taskTimeout(StateEvent stateEvent) { if (taskInstanceHashMap.containsRow(stateEvent.getTaskInstanceId())) { return true; } TaskInstance taskInstance = taskInstanceHashMap .row(stateEvent.getTaskInstanceId()) .values() .iterator().next(); if (TimeoutFlag.CLOSE == taskInstance.getTaskDefine().getTimeoutFlag()) { return true; } TaskTimeoutStrategy taskTimeoutStrategy = taskInstance.getTaskDefine().getTimeoutNotifyStrategy(); if (TaskTimeoutStrategy.FAILED == taskTimeoutStrategy) { ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); taskProcessor.action(TaskAction.TIMEOUT); return false; } else { processAlertManager.sendTaskTimeoutAlert(processInstance, taskInstance, taskInstance.getTaskDefine()); return true; } } private boolean processTimeout() { this.processAlertManager.sendProcessTimeoutAlert(this.processInstance, this.processDefinition); return true; } private boolean taskStateChangeHandler(StateEvent stateEvent) { TaskInstance task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); if (stateEvent.getExecutionStatus().typeIsFinished()) { taskFinished(task); } else if (activeTaskProcessorMaps.containsKey(stateEvent.getTaskInstanceId())) { ITaskProcessor iTaskProcessor = activeTaskProcessorMaps.get(stateEvent.getTaskInstanceId()); iTaskProcessor.run(); if (iTaskProcessor.taskState().typeIsFinished()) { task = processService.findTaskInstanceById(stateEvent.getTaskInstanceId()); taskFinished(task); } } else { logger.error("state handler error: {}", stateEvent.toString()); } return true; } private void taskFinished(TaskInstance task) { logger.info("work flow {} task {} state:{} ", processInstance.getId(), task.getId(), task.getState()); if (task.taskCanRetry()) { addTaskToStandByList(task); return; } ProcessInstance processInstance = processService.findProcessInstanceById(this.processInstance.getId()); completeTaskList.put(task.getName(), task); activeTaskProcessorMaps.remove(task.getId()); taskTimeoutCheckList.remove(task.getId()); if (task.getState().typeIsSuccess()) { processInstance.setVarPool(task.getVarPool()); processService.saveProcessInstance(processInstance); submitPostNode(task.getName()); } else if (task.getState().typeIsFailure()) { if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { submitPostNode(task.getName()); } else { errorTaskList.put(task.getName(), task); if (processInstance.getFailureStrategy() == FailureStrategy.END) { killAllTasks(); } } } this.updateProcessInstanceState(); } private boolean checkStateEvent(StateEvent stateEvent) { if (this.processInstance.getId() != stateEvent.getProcessInstanceId()) { logger.error("mismatch process instance id: {}, state event:{}", this.processInstance.getId(), stateEvent.toString()); return false; } return true; } private boolean processStateChangeHandler(StateEvent stateEvent) { try { logger.info("process:{} state {} change to {}", processInstance.getId(), processInstance.getState(), stateEvent.getExecutionStatus()); processInstance = processService.findProcessInstanceById(this.processInstance.getId()); if (processComplementData()) { return true; } if (stateEvent.getExecutionStatus().typeIsFinished()) { endProcess(); } if (stateEvent.getExecutionStatus() == ExecutionStatus.READY_STOP) { killAllTasks(); } return true; } catch (Exception e) { logger.error("process state change error:", e); } return true; } private boolean processComplementData() throws Exception { if (!needComplementProcess()) { return false; } Date scheduleDate = processInstance.getScheduleTime(); if (scheduleDate == null) { scheduleDate = complementListDate.get(0); } else if (processInstance.getState().typeIsFinished()) { endProcess(); int index = complementListDate.indexOf(scheduleDate); if (index >= complementListDate.size() - 1 || !processInstance.getState().typeIsSuccess()) { logger.info("process complement end. process id:{}", processInstance.getId()); // complement data ends || no success return false; } logger.info("process complement continue. process id:{}, schedule time:{} complementListDate:{}", processInstance.getId(), processInstance.getScheduleTime(), complementListDate.toString()); scheduleDate = complementListDate.get(index + 1); //the next process complement processInstance.setId(0); } processInstance.setScheduleTime(scheduleDate); Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); if (cmdParam.containsKey(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING)) { cmdParam.remove(Constants.CMD_PARAM_RECOVERY_START_NODE_STRING); processInstance.setCommandParam(JSONUtils.toJsonString(cmdParam)); } processInstance.setState(ExecutionStatus.RUNNING_EXECUTION); processInstance.setGlobalParams(ParameterUtils.curingGlobalParams( processDefinition.getGlobalParamMap(), processDefinition.getGlobalParamList(), CommandType.COMPLEMENT_DATA, processInstance.getScheduleTime())); processInstance.setStartTime(new Date()); processInstance.setEndTime(null); processService.saveProcessInstance(processInstance); this.taskInstanceHashMap.clear(); startProcess(); return true; } private boolean needComplementProcess() { if (processInstance.isComplementData() && Flag.NO == processInstance.getIsSubProcess()) { return true; } return false; } private void startProcess() throws Exception { if (this.taskInstanceHashMap.size() == 0) { isStart = false; buildFlowDag(); initTaskQueue(); submitPostNode(null); isStart = true; } } /** * process end handle */ private void endProcess() { this.stateEvents.clear(); processInstance.setEndTime(new Date()); processService.updateProcessInstance(processInstance); if (processInstance.getState().typeIsWaitingThread()) { processService.createRecoveryWaitingThreadCommand(null, processInstance); } List<TaskInstance> taskInstances = processService.findValidTaskListByProcessId(processInstance.getId()); ProjectUser projectUser = processService.queryProjectWithUserByProcessInstanceId(processInstance.getId()); processAlertManager.sendAlertProcessInstance(processInstance, taskInstances, projectUser); } /** * generate process dag * * @throws Exception exception */ private void buildFlowDag() throws Exception { if (this.dag != null) { return; } processDefinition = processService.findProcessDefinition(processInstance.getProcessDefinitionCode(), processInstance.getProcessDefinitionVersion()); recoverNodeIdList = getStartTaskInstanceList(processInstance.getCommandParam()); List<TaskNode> taskNodeList = processService.transformTask(processService.findRelationByCode(processDefinition.getProjectCode(), processDefinition.getCode()), Lists.newArrayList()); forbiddenTaskList.clear(); taskNodeList.forEach(taskNode -> { if (taskNode.isForbidden()) { forbiddenTaskList.put(taskNode.getName(), taskNode); } }); // generate process to get DAG info List<String> recoveryNameList = getRecoveryNodeNameList(); List<String> startNodeNameList = parseStartNodeName(processInstance.getCommandParam()); ProcessDag processDag = generateFlowDag(taskNodeList, startNodeNameList, recoveryNameList, processInstance.getTaskDependType()); if (processDag == null) { logger.error("processDag is null"); return; } // generate process dag dag = DagHelper.buildDagGraph(processDag); } /** * init task queue */ private void initTaskQueue() { taskFailedSubmit = false; activeTaskProcessorMaps.clear(); dependFailedTask.clear(); completeTaskList.clear(); errorTaskList.clear(); List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(processInstance.getId()); for (TaskInstance task : taskInstanceList) { if (task.isTaskComplete()) { completeTaskList.put(task.getName(), task); } if (task.isConditionsTask() || DagHelper.haveConditionsAfterNode(task.getName(), dag)) { continue; } if (task.getState().typeIsFailure() && !task.taskCanRetry()) { errorTaskList.put(task.getName(), task); } } if (complementListDate.size() == 0 && needComplementProcess()) { complementListDate = processService.getComplementDateList( JSONUtils.toMap(processInstance.getCommandParam()), processInstance.getProcessDefinitionCode()); logger.info(" process definition code:{} complement data: {}", processInstance.getProcessDefinitionCode(), complementListDate.toString()); } } /** * submit task to execute * * @param taskInstance task instance * @return TaskInstance */ private TaskInstance submitTaskExec(TaskInstance taskInstance) { try { ITaskProcessor taskProcessor = TaskProcessorFactory.getTaskProcessor(taskInstance.getTaskType()); if (taskInstance.getState() == ExecutionStatus.RUNNING_EXECUTION && taskProcessor.getType().equalsIgnoreCase(Constants.COMMON_TASK_TYPE)) { notifyProcessHostUpdate(taskInstance); } boolean submit = taskProcessor.submit(taskInstance, processInstance, masterConfig.getMasterTaskCommitRetryTimes(), masterConfig.getMasterTaskCommitInterval()); if (submit) { this.taskInstanceHashMap.put(taskInstance.getId(), taskInstance.getTaskCode(), taskInstance); activeTaskProcessorMaps.put(taskInstance.getId(), taskProcessor); taskProcessor.run(); addTimeoutCheck(taskInstance); TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion()); taskInstance.setTaskDefine(taskDefinition); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); this.stateEvents.add(stateEvent); } return taskInstance; } else { logger.error("process id:{} name:{} submit standby task id:{} name:{} failed!", processInstance.getId(), processInstance.getName(), taskInstance.getId(), taskInstance.getName()); return null; } } catch (Exception e) { logger.error("submit standby task error", e); return null; } } private void notifyProcessHostUpdate(TaskInstance taskInstance) { if (StringUtils.isEmpty(taskInstance.getHost())) { return; } try { HostUpdateCommand hostUpdateCommand = new HostUpdateCommand(); hostUpdateCommand.setProcessHost(NetUtils.getAddr(masterConfig.getListenPort())); hostUpdateCommand.setTaskInstanceId(taskInstance.getId()); Host host = new Host(taskInstance.getHost()); nettyExecutorManager.doExecute(host, hostUpdateCommand.convert2Command()); } catch (Exception e) { logger.error("notify process host update", e); } } private void addTimeoutCheck(TaskInstance taskInstance) { TaskDefinition taskDefinition = processService.findTaskDefinition( taskInstance.getTaskCode(), taskInstance.getTaskDefinitionVersion() ); taskInstance.setTaskDefine(taskDefinition); if (TimeoutFlag.OPEN == taskDefinition.getTimeoutFlag()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); return; } if (taskInstance.isDependTask() || taskInstance.isSubProcess()) { this.taskTimeoutCheckList.put(taskInstance.getId(), taskInstance); } } /** * find task instance in db. * in case submit more than one same name task in the same time. * * @param taskCode task code * @param taskVersion task version * @return TaskInstance */ private TaskInstance findTaskIfExists(Long taskCode, int taskVersion) { List<TaskInstance> taskInstanceList = processService.findValidTaskListByProcessId(this.processInstance.getId()); for (TaskInstance taskInstance : taskInstanceList) { if (taskInstance.getTaskCode() == taskCode && taskInstance.getTaskDefinitionVersion() == taskVersion) { return taskInstance; } } return null; } /** * encapsulation task * * @param processInstance process instance * @param taskNode taskNode * @return TaskInstance */ private TaskInstance createTaskInstance(ProcessInstance processInstance, TaskNode taskNode) { TaskInstance taskInstance = findTaskIfExists(taskNode.getCode(), taskNode.getVersion()); if (taskInstance == null) { taskInstance = new TaskInstance(); taskInstance.setTaskCode(taskNode.getCode()); taskInstance.setTaskDefinitionVersion(taskNode.getVersion()); // task name taskInstance.setName(taskNode.getName()); // task instance state taskInstance.setState(ExecutionStatus.SUBMITTED_SUCCESS); // process instance id taskInstance.setProcessInstanceId(processInstance.getId()); // task instance type taskInstance.setTaskType(taskNode.getType().toUpperCase()); // task instance whether alert taskInstance.setAlertFlag(Flag.NO); // task instance start time taskInstance.setStartTime(null); // task instance flag taskInstance.setFlag(Flag.YES); // task instance retry times taskInstance.setRetryTimes(0); // max task instance retry times taskInstance.setMaxRetryTimes(taskNode.getMaxRetryTimes()); // retry task instance interval taskInstance.setRetryInterval(taskNode.getRetryInterval()); //set task param taskInstance.setTaskParams(taskNode.getTaskParams()); // task instance priority if (taskNode.getTaskInstancePriority() == null) { taskInstance.setTaskInstancePriority(Priority.MEDIUM); } else { taskInstance.setTaskInstancePriority(taskNode.getTaskInstancePriority()); } String processWorkerGroup = processInstance.getWorkerGroup(); processWorkerGroup = StringUtils.isBlank(processWorkerGroup) ? DEFAULT_WORKER_GROUP : processWorkerGroup; String taskWorkerGroup = StringUtils.isBlank(taskNode.getWorkerGroup()) ? processWorkerGroup : taskNode.getWorkerGroup(); Long processEnvironmentCode = Objects.isNull(processInstance.getEnvironmentCode()) ? -1 : processInstance.getEnvironmentCode(); Long taskEnvironmentCode = Objects.isNull(taskNode.getEnvironmentCode()) ? processEnvironmentCode : taskNode.getEnvironmentCode(); if (!processWorkerGroup.equals(DEFAULT_WORKER_GROUP) && taskWorkerGroup.equals(DEFAULT_WORKER_GROUP)) { taskInstance.setWorkerGroup(processWorkerGroup); taskInstance.setEnvironmentCode(processEnvironmentCode); } else { taskInstance.setWorkerGroup(taskWorkerGroup); taskInstance.setEnvironmentCode(taskEnvironmentCode); } if (!taskInstance.getEnvironmentCode().equals(-1L)) { Environment environment = processService.findEnvironmentByCode(taskInstance.getEnvironmentCode()); if (Objects.nonNull(environment) && StringUtils.isNotEmpty(environment.getConfig())) { taskInstance.setEnvironmentConfig(environment.getConfig()); } } // delay execution time taskInstance.setDelayTime(taskNode.getDelayTime()); } return taskInstance; } public void getPreVarPool(TaskInstance taskInstance, Set<String> preTask) { Map<String, Property> allProperty = new HashMap<>(); Map<String, TaskInstance> allTaskInstance = new HashMap<>(); if (CollectionUtils.isNotEmpty(preTask)) { for (String preTaskName : preTask) { TaskInstance preTaskInstance = completeTaskList.get(preTaskName); if (preTaskInstance == null) { continue; } String preVarPool = preTaskInstance.getVarPool(); if (StringUtils.isNotEmpty(preVarPool)) { List<Property> properties = JSONUtils.toList(preVarPool, Property.class); for (Property info : properties) { setVarPoolValue(allProperty, allTaskInstance, preTaskInstance, info); } } } if (allProperty.size() > 0) { taskInstance.setVarPool(JSONUtils.toJsonString(allProperty.values())); } } } private void setVarPoolValue(Map<String, Property> allProperty, Map<String, TaskInstance> allTaskInstance, TaskInstance preTaskInstance, Property thisProperty) { //for this taskInstance all the param in this part is IN. thisProperty.setDirect(Direct.IN); //get the pre taskInstance Property's name String proName = thisProperty.getProp(); //if the Previous nodes have the Property of same name if (allProperty.containsKey(proName)) { //comparison the value of two Property Property otherPro = allProperty.get(proName); //if this property'value of loop is empty,use the other,whether the other's value is empty or not if (StringUtils.isEmpty(thisProperty.getValue())) { allProperty.put(proName, otherPro); //if property'value of loop is not empty,and the other's value is not empty too, use the earlier value } else if (StringUtils.isNotEmpty(otherPro.getValue())) { TaskInstance otherTask = allTaskInstance.get(proName); if (otherTask.getEndTime().getTime() > preTaskInstance.getEndTime().getTime()) { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } else { allProperty.put(proName, otherPro); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } else { allProperty.put(proName, thisProperty); allTaskInstance.put(proName, preTaskInstance); } } private void submitPostNode(String parentNodeName) { Set<String> submitTaskNodeList = DagHelper.parsePostNodes(parentNodeName, skipTaskNodeList, dag, completeTaskList); List<TaskInstance> taskInstances = new ArrayList<>(); for (String taskNode : submitTaskNodeList) { TaskNode taskNodeObject = dag.getNode(taskNode); if (taskInstanceHashMap.containsColumn(taskNodeObject.getCode())) { continue; } TaskInstance task = createTaskInstance(processInstance, taskNodeObject); taskInstances.add(task); } // if previous node success , post node submit for (TaskInstance task : taskInstances) { if (readyToSubmitTaskQueue.contains(task)) { continue; } if (completeTaskList.containsKey(task.getName())) { logger.info("task {} has already run success", task.getName()); continue; } if (task.getState().typeIsPause() || task.getState().typeIsCancel()) { logger.info("task {} stopped, the state is {}", task.getName(), task.getState()); } else { addTaskToStandByList(task); } } submitStandByTask(); updateProcessInstanceState(); } /** * determine whether the dependencies of the task node are complete * * @return DependResult */ private DependResult isTaskDepsComplete(String taskName) { Collection<String> startNodes = dag.getBeginNode(); // if vertex,returns true directly if (startNodes.contains(taskName)) { return DependResult.SUCCESS; } TaskNode taskNode = dag.getNode(taskName); List<String> depNameList = taskNode.getDepList(); for (String depsNode : depNameList) { if (!dag.containsNode(depsNode) || forbiddenTaskList.containsKey(depsNode) || skipTaskNodeList.containsKey(depsNode)) { continue; } // dependencies must be fully completed if (!completeTaskList.containsKey(depsNode)) { return DependResult.WAITING; } ExecutionStatus depTaskState = completeTaskList.get(depsNode).getState(); if (depTaskState.typeIsPause() || depTaskState.typeIsCancel()) { return DependResult.NON_EXEC; } // ignore task state if current task is condition if (taskNode.isConditionsTask()) { continue; } if (!dependTaskSuccess(depsNode, taskName)) { return DependResult.FAILED; } } logger.info("taskName: {} completeDependTaskList: {}", taskName, Arrays.toString(completeTaskList.keySet().toArray())); return DependResult.SUCCESS; } /** * depend node is completed, but here need check the condition task branch is the next node */ private boolean dependTaskSuccess(String dependNodeName, String nextNodeName) { if (dag.getNode(dependNodeName).isConditionsTask()) { //condition task need check the branch to run List<String> nextTaskList = DagHelper.parseConditionTask(dependNodeName, skipTaskNodeList, dag, completeTaskList); if (!nextTaskList.contains(nextNodeName)) { return false; } } else { ExecutionStatus depTaskState = completeTaskList.get(dependNodeName).getState(); if (depTaskState.typeIsFailure()) { return false; } } return true; } /** * query task instance by complete state * * @param state state * @return task instance list */ private List<TaskInstance> getCompleteTaskByState(ExecutionStatus state) { List<TaskInstance> resultList = new ArrayList<>(); for (Map.Entry<String, TaskInstance> entry : completeTaskList.entrySet()) { if (entry.getValue().getState() == state) { resultList.add(entry.getValue()); } } return resultList; } /** * where there are ongoing tasks * * @param state state * @return ExecutionStatus */ private ExecutionStatus runningState(ExecutionStatus state) { if (state == ExecutionStatus.READY_STOP || state == ExecutionStatus.READY_PAUSE || state == ExecutionStatus.WAITING_THREAD || state == ExecutionStatus.DELAY_EXECUTION) { // if the running task is not completed, the state remains unchanged return state; } else { return ExecutionStatus.RUNNING_EXECUTION; } } /** * exists failure task,contains submit failure、dependency failure,execute failure(retry after) * * @return Boolean whether has failed task */ private boolean hasFailedTask() { if (this.taskFailedSubmit) { return true; } if (this.errorTaskList.size() > 0) { return true; } return this.dependFailedTask.size() > 0; } /** * process instance failure * * @return Boolean whether process instance failed */ private boolean processFailed() { if (hasFailedTask()) { if (processInstance.getFailureStrategy() == FailureStrategy.END) { return true; } if (processInstance.getFailureStrategy() == FailureStrategy.CONTINUE) { return readyToSubmitTaskQueue.size() == 0 || activeTaskProcessorMaps.size() == 0; } } return false; } /** * whether task for waiting thread * * @return Boolean whether has waiting thread task */ private boolean hasWaitingThreadTask() { List<TaskInstance> waitingList = getCompleteTaskByState(ExecutionStatus.WAITING_THREAD); return CollectionUtils.isNotEmpty(waitingList); } /** * prepare for pause * 1,failed retry task in the preparation queue , returns to failure directly * 2,exists pause task,complement not completed, pending submission of tasks, return to suspension * 3,success * * @return ExecutionStatus */ private ExecutionStatus processReadyPause() { if (hasRetryTaskInStandBy()) { return ExecutionStatus.FAILURE; } List<TaskInstance> pauseList = getCompleteTaskByState(ExecutionStatus.PAUSE); if (CollectionUtils.isNotEmpty(pauseList) || !isComplementEnd() || readyToSubmitTaskQueue.size() > 0) { return ExecutionStatus.PAUSE; } else { return ExecutionStatus.SUCCESS; } } /** * generate the latest process instance status by the tasks state * * @param instance * @return process instance execution status */ private ExecutionStatus getProcessInstanceState(ProcessInstance instance) { ExecutionStatus state = instance.getState(); if (activeTaskProcessorMaps.size() > 0 || hasRetryTaskInStandBy()) { // active task and retry task exists return runningState(state); } // process failure if (processFailed()) { return ExecutionStatus.FAILURE; } // waiting thread if (hasWaitingThreadTask()) { return ExecutionStatus.WAITING_THREAD; } // pause if (state == ExecutionStatus.READY_PAUSE) { return processReadyPause(); } // stop if (state == ExecutionStatus.READY_STOP) { List<TaskInstance> stopList = getCompleteTaskByState(ExecutionStatus.STOP); List<TaskInstance> killList = getCompleteTaskByState(ExecutionStatus.KILL); if (CollectionUtils.isNotEmpty(stopList) || CollectionUtils.isNotEmpty(killList) || !isComplementEnd()) { return ExecutionStatus.STOP; } else { return ExecutionStatus.SUCCESS; } } // success if (state == ExecutionStatus.RUNNING_EXECUTION) { List<TaskInstance> killTasks = getCompleteTaskByState(ExecutionStatus.KILL); if (readyToSubmitTaskQueue.size() > 0) { //tasks currently pending submission, no retries, indicating that depend is waiting to complete return ExecutionStatus.RUNNING_EXECUTION; } else if (CollectionUtils.isNotEmpty(killTasks)) { // tasks maybe killed manually return ExecutionStatus.FAILURE; } else { // if the waiting queue is empty and the status is in progress, then success return ExecutionStatus.SUCCESS; } } return state; } /** * whether complement end * * @return Boolean whether is complement end */ private boolean isComplementEnd() { if (!processInstance.isComplementData()) { return true; } try { Map<String, String> cmdParam = JSONUtils.toMap(processInstance.getCommandParam()); Date endTime = DateUtils.getScheduleDate(cmdParam.get(CMDPARAM_COMPLEMENT_DATA_END_DATE)); return processInstance.getScheduleTime().equals(endTime); } catch (Exception e) { logger.error("complement end failed ", e); return false; } } /** * updateProcessInstance process instance state * after each batch of tasks is executed, the status of the process instance is updated */ private void updateProcessInstanceState() { ProcessInstance instance = processService.findProcessInstanceById(processInstance.getId()); ExecutionStatus state = getProcessInstanceState(instance); if (processInstance.getState() != state) { logger.info( "work flow process instance [id: {}, name:{}], state change from {} to {}, cmd type: {}", processInstance.getId(), processInstance.getName(), processInstance.getState(), state, processInstance.getCommandType()); instance.setState(state); processService.updateProcessInstance(instance); processInstance = instance; StateEvent stateEvent = new StateEvent(); stateEvent.setExecutionStatus(processInstance.getState()); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setType(StateEventType.PROCESS_STATE_CHANGE); this.processStateChangeHandler(stateEvent); } } /** * get task dependency result * * @param taskInstance task instance * @return DependResult */ private DependResult getDependResultForTask(TaskInstance taskInstance) { return isTaskDepsComplete(taskInstance.getName()); } /** * add task to standby list * * @param taskInstance task instance */ private void addTaskToStandByList(TaskInstance taskInstance) { logger.info("add task to stand by list: {}", taskInstance.getName()); try { readyToSubmitTaskQueue.put(taskInstance); } catch (Exception e) { logger.error("add task instance to readyToSubmitTaskQueue error, taskName: {}", taskInstance.getName(), e); } } /** * remove task from stand by list * * @param taskInstance task instance */ private void removeTaskFromStandbyList(TaskInstance taskInstance) { logger.info("remove task from stand by list, id: {} name:{}", taskInstance.getId(), taskInstance.getName()); try { readyToSubmitTaskQueue.remove(taskInstance); } catch (Exception e) { logger.error("remove task instance from readyToSubmitTaskQueue error, task id:{}, Name: {}", taskInstance.getId(), taskInstance.getName(), e); } } /** * has retry task in standby * * @return Boolean whether has retry task in standby */ private boolean hasRetryTaskInStandBy() { for (Iterator<TaskInstance> iter = readyToSubmitTaskQueue.iterator(); iter.hasNext(); ) { if (iter.next().getState().typeIsFailure()) { return true; } } return false; } /** * close the on going tasks */ private void killAllTasks() { logger.info("kill called on process instance id: {}, num: {}", processInstance.getId(), activeTaskProcessorMaps.size()); for (int taskId : activeTaskProcessorMaps.keySet()) { TaskInstance taskInstance = processService.findTaskInstanceById(taskId); if (taskInstance == null || taskInstance.getState().typeIsFinished()) { continue; } ITaskProcessor taskProcessor = activeTaskProcessorMaps.get(taskId); taskProcessor.action(TaskAction.STOP); if (taskProcessor.taskState().typeIsFinished()) { StateEvent stateEvent = new StateEvent(); stateEvent.setType(StateEventType.TASK_STATE_CHANGE); stateEvent.setProcessInstanceId(this.processInstance.getId()); stateEvent.setTaskInstanceId(taskInstance.getId()); stateEvent.setExecutionStatus(taskProcessor.taskState()); this.addStateEvent(stateEvent); } } } public boolean workFlowFinish() { return this.processInstance.getState().typeIsFinished(); } /** * whether the retry interval is timed out * * @param taskInstance task instance * @return Boolean */ private boolean retryTaskIntervalOverTime(TaskInstance taskInstance) { if (taskInstance.getState() != ExecutionStatus.FAILURE) { return true; } if (taskInstance.getId() == 0 || taskInstance.getMaxRetryTimes() == 0 || taskInstance.getRetryInterval() == 0) { return true; } Date now = new Date(); long failedTimeInterval = DateUtils.differSec(now, taskInstance.getEndTime()); // task retry does not over time, return false return taskInstance.getRetryInterval() * SEC_2_MINUTES_TIME_UNIT < failedTimeInterval; } /** * handling the list of tasks to be submitted */ private void submitStandByTask() { try { int length = readyToSubmitTaskQueue.size(); for (int i = 0; i < length; i++) { TaskInstance task = readyToSubmitTaskQueue.peek(); if (task == null) { continue; } // stop tasks which is retrying if forced success happens if (task.taskCanRetry()) { TaskInstance retryTask = processService.findTaskInstanceById(task.getId()); if (retryTask != null && retryTask.getState().equals(ExecutionStatus.FORCED_SUCCESS)) { task.setState(retryTask.getState()); logger.info("task: {} has been forced success, put it into complete task list and stop retrying", task.getName()); removeTaskFromStandbyList(task); completeTaskList.put(task.getName(), task); submitPostNode(task.getName()); continue; } } //init varPool only this task is the first time running if (task.isFirstRun()) { //get pre task ,get all the task varPool to this task Set<String> preTask = dag.getPreviousNodes(task.getName()); getPreVarPool(task, preTask); } DependResult dependResult = getDependResultForTask(task); if (DependResult.SUCCESS == dependResult) { if (retryTaskIntervalOverTime(task)) { TaskInstance taskInstance = submitTaskExec(task); if (taskInstance == null) { this.taskFailedSubmit = true; } else { removeTaskFromStandbyList(task); } } } else if (DependResult.FAILED == dependResult) { // if the dependency fails, the current node is not submitted and the state changes to failure. dependFailedTask.put(task.getName(), task); removeTaskFromStandbyList(task); logger.info("task {},id:{} depend result : {}", task.getName(), task.getId(), dependResult); } else if (DependResult.NON_EXEC == dependResult) { // for some reasons(depend task pause/stop) this task would not be submit removeTaskFromStandbyList(task); logger.info("remove task {},id:{} , because depend result : {}", task.getName(), task.getId(), dependResult); } } } catch (Exception e) { logger.error("submit standby task error", e); } } /** * get recovery task instance * * @param taskId task id * @return recovery task instance */ private TaskInstance getRecoveryTaskInstance(String taskId) { if (!StringUtils.isNotEmpty(taskId)) { return null; } try { Integer intId = Integer.valueOf(taskId); TaskInstance task = processService.findTaskInstanceById(intId); if (task == null) { logger.error("start node id cannot be found: {}", taskId); } else { return task; } } catch (Exception e) { logger.error("get recovery task instance failed ", e); } return null; } /** * get start task instance list * * @param cmdParam command param * @return task instance list */ private List<TaskInstance> getStartTaskInstanceList(String cmdParam) { List<TaskInstance> instanceList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap != null && paramMap.containsKey(CMD_PARAM_RECOVERY_START_NODE_STRING)) { String[] idList = paramMap.get(CMD_PARAM_RECOVERY_START_NODE_STRING).split(Constants.COMMA); for (String nodeId : idList) { TaskInstance task = getRecoveryTaskInstance(nodeId); if (task != null) { instanceList.add(task); } } } return instanceList; } /** * parse "StartNodeNameList" from cmd param * * @param cmdParam command param * @return start node name list */ private List<String> parseStartNodeName(String cmdParam) { List<String> startNodeNameList = new ArrayList<>(); Map<String, String> paramMap = JSONUtils.toMap(cmdParam); if (paramMap == null) { return startNodeNameList; } if (paramMap.containsKey(CMD_PARAM_START_NODE_NAMES)) { startNodeNameList = Arrays.asList(paramMap.get(CMD_PARAM_START_NODE_NAMES).split(Constants.COMMA)); } return startNodeNameList; } /** * generate start node name list from parsing command param; * if "StartNodeIdList" exists in command param, return StartNodeIdList * * @return recovery node name list */ private List<String> getRecoveryNodeNameList() { List<String> recoveryNodeNameList = new ArrayList<>(); if (CollectionUtils.isNotEmpty(recoverNodeIdList)) { for (TaskInstance task : recoverNodeIdList) { recoveryNodeNameList.add(task.getName()); } } return recoveryNodeNameList; } /** * generate flow dag * * @param totalTaskNodeList total task node list * @param startNodeNameList start node name list * @param recoveryNodeNameList recovery node name list * @param depNodeType depend node type * @return ProcessDag process dag * @throws Exception exception */ public ProcessDag generateFlowDag(List<TaskNode> totalTaskNodeList, List<String> startNodeNameList, List<String> recoveryNodeNameList, TaskDependType depNodeType) throws Exception { return DagHelper.generateFlowDag(totalTaskNodeList, startNodeNameList, recoveryNodeNameList, depNodeType); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,219
[Bug] [User] modify user's information and DS's page shows wrong information at the frond-end
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch:dev when I modify some users' information and ds's page shows wrong information at the frond-end which is strange . ### What you expected to happen the user admin who logs on don't change to other users when he modify some users' information ### How to reproduce 1、create two new users ![image](https://user-images.githubusercontent.com/39773194/133393258-c1549b4d-7161-4694-8e03-9e307b6a07f5.png) 2、modify the information of one of them ![image](https://user-images.githubusercontent.com/39773194/133393204-3644ddbd-9f17-4ca3-b7da-72299b701b8e.png) 3、click the Edit button and the problem goes on. ![image](https://user-images.githubusercontent.com/39773194/133393375-bd2a3308-5d58-4152-9189-3c2ff6d20c63.png) ### Anything else _No response_ ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6219
https://github.com/apache/dolphinscheduler/pull/6341
67094d0f7f1803e263e73a0164ecbbd829c57129
cdcbe84c8a1bb1e5286568f42d2eb2ff530dbe6a
"2021-09-15T08:07:00Z"
java
"2021-09-27T15:31:06Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/_source/createUser.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-popover ref="popover" :ok-text="item ? $t('Edit') : $t('Submit')" @ok="_ok" @close="close"> <template slot="content"> <div class="create-user-model"> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('User Name')}}</template> <template slot="content"> <el-input type="input" v-model="userName" maxlength="60" size="small" :placeholder="$t('Please enter user name')"> </el-input> </template> </m-list-box-f> <m-list-box-f v-if="router.history.current.name !== 'account'"> <template slot="name"><strong>*</strong>{{$t('Password')}}</template> <template slot="content"> <el-input type="password" v-model="userPassword" size="small" :placeholder="$t('Please enter your password')"> </el-input> </template> </m-list-box-f> <m-list-box-f v-if="isADMIN"> <template slot="name"><strong>*</strong>{{$t('Tenant')}}</template> <template slot="content"> <el-select v-model="tenantId" style="width: 100%;" size="small"> <el-option v-for="city in tenantList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </template> </m-list-box-f> <m-list-box-f v-if="isADMIN"> <template slot="name">{{$t('Queue')}}</template> <template slot="content"> <el-select v-model="queueName" style="width: 100%;" size="small"> <el-input slot="trigger" slot-scope="{ selectedModel }" readonly :placeholder="$t('Please select a queue')" :value="selectedModel ? selectedModel.label : ''" @on-click-icon.stop="queueName = ''"> <em slot="suffix" class="el-icon-error" style="font-size: 15px;cursor: pointer;" v-show="queueName ==''"></em> <em slot="suffix" class="el-icon-bottom" style="font-size: 12px;" v-show="queueName!=''"></em> </el-input> <el-option v-for="city in queueList" :key="city.id" :value="city.id" :label="city.code"> </el-option> </el-select> </template> </m-list-box-f> <m-list-box-f> <template slot="name"><strong>*</strong>{{$t('Email')}}</template> <template slot="content"> <el-input type="input" v-model="email" size="small" :placeholder="$t('Please enter email')"> </el-input> </template> </m-list-box-f> <m-list-box-f> <template slot="name">{{$t('Phone')}}</template> <template slot="content"> <el-input type="input" v-model="phone" size="small" :placeholder="$t('Please enter phone number')"> </el-input> </template> </m-list-box-f> <m-list-box-f style="line-height: 38px;"> <template slot="name">{{$t('State')}}</template> <template slot="content"> <el-radio-group v-model="userState" size="small"> <el-radio :label="'1'">{{$t('Enable')}}</el-radio> <el-radio :label="'0'">{{$t('Disable')}}</el-radio> </el-radio-group> </template> </m-list-box-f> </div> </template> </m-popover> </template> <script> import _ from 'lodash' import i18n from '@/module/i18n' import store from '@/conf/home/store' import router from '@/conf/home/router' import mPopover from '@/module/components/popup/popover' import mListBoxF from '@/module/components/listBoxF/listBoxF' export default { name: 'create-user', data () { return { store, router, queueList: [], userName: '', userPassword: '', tenantId: '', queueName: '', email: '', phone: '', userState: '1', tenantList: [], // Source admin user information isADMIN: store.state.user.userInfo.userType === 'ADMIN_USER' && router.history.current.name !== 'account' } }, props: { item: Object, fromUserInfo: Boolean }, methods: { _ok () { if (this._verification()) { // The name is not verified if (this.item && this.item.groupName === this.groupName) { this._submit() return } // Verify username this.store.dispatch('security/verifyName', { type: 'user', userName: this.userName }).then(res => { this._submit() }).catch(e => { this.$message.error(e.msg || '') }) } }, _verification () { let regEmail = /^([a-zA-Z0-9]+[_|\-|\.]?)*[a-zA-Z0-9]+@([a-zA-Z0-9]+[_|\-|\.]?)*[a-zA-Z0-9]+\.[a-zA-Z]{2,}$/ // eslint-disable-line // Mobile phone number regular let regPhone = /^1(3|4|5|6|7|8)\d{9}$/; // eslint-disable-line let regPassword = /^(?![0-9]+$)(?![a-z]+$)(?![A-Z]+$)(?![`~!@#$%^&*()_\-+=<>?:"{}|,./;'\\[\]·~!@#¥%……&*()——\-+={}|《》?:“”【】、;‘’,。、]+$)[`~!@#$%^&*()_\-+=<>?:"{}|,./;'\\[\]·~!@#¥%……&*()——\-+={}|《》?:“”【】、;‘’,。、0-9A-Za-z]{6,22}$/ let userNameLength = this.userName.length // user name if (userNameLength < 3 || userNameLength > 39) { this.$message.warning(`${i18n.$t('User name length is between 3 and 39')}`) return false } if (!this.userName.replace(/\s*/g, '')) { this.$message.warning(`${i18n.$t('Please enter user name')}`) return false } // password if (this.userPassword !== '' && this.item) { if (!regPassword.test(this.userPassword)) { this.$message.warning(`${i18n.$t('Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22')}`) return false } } else if (!this.item) { if (!regPassword.test(this.userPassword)) { this.$message.warning(`${i18n.$t('Password consists of at least two combinations of numbers, letters, and characters, and the length is between 6-22')}`) return false } } // email if (!this.email) { this.$message.warning(`${i18n.$t('Please enter email')}`) return false } // Verify email if (!regEmail.test(this.email)) { this.$message.warning(`${i18n.$t('Please enter the correct email format')}`) return false } // Verify phone if (this.phone) { if (!regPhone.test(this.phone)) { this.$message.warning(`${i18n.$t('Please enter the correct mobile phone format')}`) return false } } return true }, _getQueueList () { return new Promise((resolve, reject) => { this.store.dispatch('security/getQueueList').then(res => { this.queueList = _.map(res, v => { return { id: v.id, code: v.queueName } }) this.$nextTick(() => { this.queueName = this.queueList[0].id }) resolve() }) }) }, _getTenantList () { return new Promise((resolve, reject) => { this.store.dispatch('security/getTenantList').then(res => { let arr = _.filter(res, (o) => { return o.id !== -1 }) this.tenantList = _.map(arr, v => { return { id: v.id, code: v.tenantCode } }) this.$nextTick(() => { if (this.tenantList.length) { this.tenantId = this.tenantList[0].id } }) resolve() }) }) }, _submit () { this.$refs.popover.spinnerLoading = true let queueCode = '' // get queue code if (this.queueName !== '') { queueCode = this.queueList.length > 0 ? _.find(this.queueList, ['id', this.queueName]).code : '' } let param = { userName: this.userName, userPassword: this.userPassword, tenantId: this.tenantId, email: this.email, queue: queueCode, phone: this.phone, state: this.userState } if (this.item) { param.id = this.item.id } this.store.dispatch(`security/${this.item ? 'updateUser' : 'createUser'}`, param).then(res => { this.$refs.popover.spinnerLoading = false this.$emit('onUpdate', param) this.$message.success(res.msg) }).catch(e => { this.$message.error(e.msg || '') this.$refs.popover.spinnerLoading = false }) }, close () { this.$emit('close') } }, watch: {}, created () { // Administrator gets tenant list if (this.isADMIN) { Promise.all([this._getQueueList(), this._getTenantList()]).then(() => { if (this.item) { this.userName = this.item.userName this.userPassword = '' this.email = this.item.email this.phone = this.item.phone this.state = this.item.state this.userState = this.item.state + '' || '1' if (this.fromUserInfo || this.item.tenantId) { this.tenantId = this.item.tenantId } this.$nextTick(() => { let queue = _.find(this.queueList, ['code', this.item.queue]) if (queue) { this.queueName = queue.id || '' } }) } }) } else { if (this.item) { this.userName = this.item.userName this.userPassword = '' this.email = this.item.email this.phone = this.item.phone this.state = this.item.state this.userState = this.state + '' || '1' if (this.fromUserInfo || this.item.tenantId) { this.tenantId = this.item.tenantId } if (this.queueList.length > 0) { let queue = _.find(this.queueList, ['code', this.item.queue]) if (queue) { this.queueName = queue.id || '' } } else { this.queueName = '' } } } }, mounted () { }, components: { mPopover, mListBoxF } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,219
[Bug] [User] modify user's information and DS's page shows wrong information at the frond-end
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened branch:dev when I modify some users' information and ds's page shows wrong information at the frond-end which is strange . ### What you expected to happen the user admin who logs on don't change to other users when he modify some users' information ### How to reproduce 1、create two new users ![image](https://user-images.githubusercontent.com/39773194/133393258-c1549b4d-7161-4694-8e03-9e307b6a07f5.png) 2、modify the information of one of them ![image](https://user-images.githubusercontent.com/39773194/133393204-3644ddbd-9f17-4ca3-b7da-72299b701b8e.png) 3、click the Edit button and the problem goes on. ![image](https://user-images.githubusercontent.com/39773194/133393375-bd2a3308-5d58-4152-9189-3c2ff6d20c63.png) ### Anything else _No response_ ### Are you willing to submit PR? - [x] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6219
https://github.com/apache/dolphinscheduler/pull/6341
67094d0f7f1803e263e73a0164ecbbd829c57129
cdcbe84c8a1bb1e5286568f42d2eb2ff530dbe6a
"2021-09-15T08:07:00Z"
java
"2021-09-27T15:31:06Z"
dolphinscheduler-ui/src/js/conf/home/pages/security/pages/users/index.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <m-list-construction :title="$t('User Manage')"> <template slot="conditions"> <m-conditions @on-conditions="_onConditions"> <template slot="button-group" v-if="userList.length"> <el-button size="mini" @click="_create('')">{{$t('Create User')}}</el-button> <el-dialog :title="item ? $t('Edit User') : $t('Create User')" v-if="createUserDialog" :visible.sync="createUserDialog" width="auto"> <m-create-user :item="item" @onUpdate="onUpdate" @close="close"></m-create-user> </el-dialog> </template> </m-conditions> </template> <template slot="content"> <template v-if="userList.length || total>0"> <m-list @on-edit="_onEdit" @on-update="_onUpdate" :user-list="userList" :page-no="searchParams.pageNo" :page-size="searchParams.pageSize"> </m-list> <div class="page-box"> <el-pagination background @current-change="_page" @size-change="_pageSize" :page-size="searchParams.pageSize" :current-page.sync="searchParams.pageNo" :page-sizes="[10, 30, 50]" layout="sizes, prev, pager, next, jumper" :total="total"> </el-pagination> </div> </template> <template v-if="!userList.length && total<=0"> <m-no-data></m-no-data> </template> <m-spin :is-spin="isLoading" :is-left="isLeft"></m-spin> </template> </m-list-construction> </template> <script> import _ from 'lodash' import { mapActions, mapMutations } from 'vuex' import mList from './_source/list' import mCreateUser from './_source/createUser' import mSpin from '@/module/components/spin/spin' import mNoData from '@/module/components/noData/noData' import listUrlParamHandle from '@/module/mixin/listUrlParamHandle' import mConditions from '@/module/components/conditions/conditions' import mListConstruction from '@/module/components/listConstruction/listConstruction' export default { name: 'users-index', data () { return { total: null, isLoading: true, userList: [], searchParams: { pageSize: 10, pageNo: 1, searchVal: '' }, isLeft: true, createUserDialog: false, item: {} } }, mixins: [listUrlParamHandle], props: {}, methods: { ...mapMutations('user', ['setUserInfo']), ...mapActions('security', ['getUsersListP']), /** * Query */ _onConditions (o) { this.searchParams = _.assign(this.searchParams, o) this.searchParams.pageNo = 1 }, _page (val) { this.searchParams.pageNo = val }, _pageSize (val) { this.searchParams.pageSize = val }, _onUpdate () { this._debounceGET() }, _onEdit (item) { this._create(item) }, _create (item) { this.item = item this.createUserDialog = true }, onUpdate (param) { this._debounceGET('false') this.setUserInfo(param) this.createUserDialog = false }, close () { this.createUserDialog = false }, _getList (flag) { if (sessionStorage.getItem('isLeft') === 0) { this.isLeft = false } else { this.isLeft = true } this.isLoading = !flag this.getUsersListP(this.searchParams).then(res => { if (this.searchParams.pageNo > 1 && res.totalList.length === 0) { this.searchParams.pageNo = this.searchParams.pageNo - 1 } else { this.userList = [] this.userList = res.totalList this.total = res.total this.isLoading = false } }).catch(e => { this.isLoading = false }) } }, watch: { // router '$route' (a) { // url no params get instance list this.searchParams.pageNo = _.isEmpty(a.query) ? 1 : a.query.pageNo } }, created () { }, mounted () { }, beforeDestroy () { sessionStorage.setItem('isLeft', 1) }, components: { mList, mListConstruction, mConditions, mSpin, mNoData, mCreateUser } } </script>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,367
[Bug] [Plugin TIS] Misuseing PowerMockito.spy lead to UT faild
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened In Plugin TIS TestCase , by misuseing PowerMockito.spy lead to UT faild [https://github.com/apache/dolphinscheduler/pull/6363/checks](https://github.com/apache/dolphinscheduler/pull/6363/checks) ``` Error: Tests run: 3, Failures: 0, Errors: 3, Skipped: 0, Time elapsed: 0.786 s <<< FAILURE! - in org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest Error: testInit(org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest) Time elapsed: 0.531 s <<< ERROR! java.lang.IllegalArgumentException: name argument cannot be null at org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest.before(TISTaskTest.java:71) Error: testGetTISConfigParams(org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest) Time elapsed: 0.001 s <<< ERROR! java.lang.IllegalArgumentException: name argument cannot be null at org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest.before(TISTaskTest.java:71) Error: testHandle(org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest) Time elapsed: 0.001 s <<< ERROR! java.lang.IllegalArgumentException: name argument cannot be null at org.apache.dolphinscheduler.plugin.task.tis.TISTaskTest.before(TISTaskTest.java:71) ``` ### What you expected to happen make the UT pass ### How to reproduce for fix this bug ,shoud remote PowerMockito.spy() invoking ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6367
https://github.com/apache/dolphinscheduler/pull/6387
cdcbe84c8a1bb1e5286568f42d2eb2ff530dbe6a
a62a7a7216fe2021d1516b6d7370598123cced37
"2021-09-27T03:24:56Z"
java
"2021-09-28T03:38:22Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-sql/src/main/java/org/apache/dolphinscheduler/plugin/task/sql/SqlTask.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.plugin.task.sql; import org.apache.dolphinscheduler.plugin.task.api.AbstractTaskExecutor; import org.apache.dolphinscheduler.plugin.task.api.TaskException; import org.apache.dolphinscheduler.plugin.task.datasource.BaseConnectionParam; import org.apache.dolphinscheduler.plugin.task.datasource.DatasourceUtil; import org.apache.dolphinscheduler.plugin.task.util.MapUtils; import org.apache.dolphinscheduler.spi.enums.DbType; import org.apache.dolphinscheduler.spi.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.spi.task.AbstractParameters; import org.apache.dolphinscheduler.spi.task.Direct; import org.apache.dolphinscheduler.spi.task.Property; import org.apache.dolphinscheduler.spi.task.TaskAlertInfo; import org.apache.dolphinscheduler.spi.task.TaskConstants; import org.apache.dolphinscheduler.spi.task.paramparser.ParamUtils; import org.apache.dolphinscheduler.spi.task.paramparser.ParameterUtils; import org.apache.dolphinscheduler.spi.task.request.SQLTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.dolphinscheduler.spi.task.request.UdfFuncRequest; import org.apache.dolphinscheduler.spi.utils.CollectionUtils; import org.apache.dolphinscheduler.spi.utils.JSONUtils; import org.apache.dolphinscheduler.spi.utils.StringUtils; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.text.MessageFormat; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.slf4j.Logger; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; public class SqlTask extends AbstractTaskExecutor { /** * taskExecutionContext */ private TaskRequest taskExecutionContext; /** * sql parameters */ private SqlParameters sqlParameters; /** * base datasource */ private BaseConnectionParam baseConnectionParam; /** * create function format */ private static final String CREATE_FUNCTION_FORMAT = "create temporary function {0} as ''{1}''"; /** * Abstract Yarn Task * * @param taskRequest taskRequest */ public SqlTask(TaskRequest taskRequest) { super(taskRequest); this.taskExecutionContext = taskRequest; this.sqlParameters = JSONUtils.parseObject(taskExecutionContext.getTaskParams(), SqlParameters.class); assert sqlParameters != null; if (!sqlParameters.checkParameters()) { throw new RuntimeException("sql task params is not valid"); } } @Override public AbstractParameters getParameters() { return sqlParameters; } @Override public void handle() throws Exception { // set the name of the current thread String threadLoggerInfoName = String.format(TaskConstants.TASK_LOG_INFO_FORMAT, taskExecutionContext.getTaskAppId()); Thread.currentThread().setName(threadLoggerInfoName); logger.info("Full sql parameters: {}", sqlParameters); logger.info("sql type : {}, datasource : {}, sql : {} , localParams : {},udfs : {},showType : {},connParams : {},varPool : {} ,query max result limit {}", sqlParameters.getType(), sqlParameters.getDatasource(), sqlParameters.getSql(), sqlParameters.getLocalParams(), sqlParameters.getUdfs(), sqlParameters.getShowType(), sqlParameters.getConnParams(), sqlParameters.getVarPool(), sqlParameters.getLimit()); try { SQLTaskExecutionContext sqlTaskExecutionContext = taskExecutionContext.getSqlTaskExecutionContext(); // get datasource baseConnectionParam = (BaseConnectionParam) DatasourceUtil.buildConnectionParams( DbType.valueOf(sqlParameters.getType()), sqlTaskExecutionContext.getConnectionParams()); // ready to execute SQL and parameter entity Map SqlBinds mainSqlBinds = getSqlAndSqlParamsMap(sqlParameters.getSql()); List<SqlBinds> preStatementSqlBinds = Optional.ofNullable(sqlParameters.getPreStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<SqlBinds> postStatementSqlBinds = Optional.ofNullable(sqlParameters.getPostStatements()) .orElse(new ArrayList<>()) .stream() .map(this::getSqlAndSqlParamsMap) .collect(Collectors.toList()); List<String> createFuncs = createFuncs(sqlTaskExecutionContext.getUdfFuncTenantCodeMap(), logger); // execute sql task executeFuncAndSql(mainSqlBinds, preStatementSqlBinds, postStatementSqlBinds, createFuncs); setExitStatusCode(TaskConstants.EXIT_CODE_SUCCESS); } catch (Exception e) { setExitStatusCode(TaskConstants.EXIT_CODE_FAILURE); logger.error("sql task error: {}", e.toString()); throw e; } } /** * execute function and sql * * @param mainSqlBinds main sql binds * @param preStatementsBinds pre statements binds * @param postStatementsBinds post statements binds * @param createFuncs create functions */ public void executeFuncAndSql(SqlBinds mainSqlBinds, List<SqlBinds> preStatementsBinds, List<SqlBinds> postStatementsBinds, List<String> createFuncs) throws Exception { Connection connection = null; PreparedStatement stmt = null; ResultSet resultSet = null; try { // create connection connection = DatasourceUtil.getConnection(DbType.valueOf(sqlParameters.getType()), baseConnectionParam); // create temp function if (CollectionUtils.isNotEmpty(createFuncs)) { createTempFunction(connection, createFuncs); } // pre sql preSql(connection, preStatementsBinds); stmt = prepareStatementAndBind(connection, mainSqlBinds); String result = null; // decide whether to executeQuery or executeUpdate based on sqlType if (sqlParameters.getSqlType() == SqlType.QUERY.ordinal()) { // query statements need to be convert to JsonArray and inserted into Alert to send resultSet = stmt.executeQuery(); result = resultProcess(resultSet); } else if (sqlParameters.getSqlType() == SqlType.NON_QUERY.ordinal()) { // non query statement String updateResult = String.valueOf(stmt.executeUpdate()); result = setNonQuerySqlReturn(updateResult, sqlParameters.getLocalParams()); } //deal out params sqlParameters.dealOutParam(result); postSql(connection, postStatementsBinds); } catch (Exception e) { logger.error("execute sql error: {}", e.getMessage()); throw e; } finally { close(resultSet, stmt, connection); } } private String setNonQuerySqlReturn(String updateResult, List<Property> properties) { String result = null; for (Property info : properties) { if (Direct.OUT == info.getDirect()) { List<Map<String, String>> updateRL = new ArrayList<>(); Map<String, String> updateRM = new HashMap<>(); updateRM.put(info.getProp(), updateResult); updateRL.add(updateRM); result = JSONUtils.toJsonString(updateRL); break; } } return result; } /** * result process * * @param resultSet resultSet * @throws Exception Exception */ private String resultProcess(ResultSet resultSet) throws Exception { ArrayNode resultJSONArray = JSONUtils.createArrayNode(); if (resultSet != null) { ResultSetMetaData md = resultSet.getMetaData(); int num = md.getColumnCount(); int rowCount = 0; while (rowCount < sqlParameters.getLimit() && resultSet.next()) { ObjectNode mapOfColValues = JSONUtils.createObjectNode(); for (int i = 1; i <= num; i++) { mapOfColValues.set(md.getColumnLabel(i), JSONUtils.toJsonNode(resultSet.getObject(i))); } resultJSONArray.add(mapOfColValues); rowCount++; } int displayRows = sqlParameters.getDisplayRows() > 0 ? sqlParameters.getDisplayRows() : TaskConstants.DEFAULT_DISPLAY_ROWS; displayRows = Math.min(displayRows, resultJSONArray.size()); logger.info("display sql result {} rows as follows:", displayRows); for (int i = 0; i < displayRows; i++) { String row = JSONUtils.toJsonString(resultJSONArray.get(i)); logger.info("row {} : {}", i + 1, row); } } String result = JSONUtils.toJsonString(resultJSONArray); if (sqlParameters.getSendEmail() == null || sqlParameters.getSendEmail()) { sendAttachment(sqlParameters.getGroupId(), StringUtils.isNotEmpty(sqlParameters.getTitle()) ? sqlParameters.getTitle() : taskExecutionContext.getTaskName() + " query result sets", result); } logger.debug("execute sql result : {}", result); return result; } /** * send alert as an attachment * * @param title title * @param content content */ private void sendAttachment(int groupId, String title, String content) { setNeedAlert(Boolean.TRUE); TaskAlertInfo taskAlertInfo = new TaskAlertInfo(); taskAlertInfo.setAlertGroupId(groupId); taskAlertInfo.setContent(content); taskAlertInfo.setTitle(title); } /** * pre sql * * @param connection connection * @param preStatementsBinds preStatementsBinds */ private void preSql(Connection connection, List<SqlBinds> preStatementsBinds) throws Exception { for (SqlBinds sqlBind : preStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("pre statement execute result: {}, for sql: {}", result, sqlBind.getSql()); } } } /** * post sql * * @param connection connection * @param postStatementsBinds postStatementsBinds */ private void postSql(Connection connection, List<SqlBinds> postStatementsBinds) throws Exception { for (SqlBinds sqlBind : postStatementsBinds) { try (PreparedStatement pstmt = prepareStatementAndBind(connection, sqlBind)) { int result = pstmt.executeUpdate(); logger.info("post statement execute result: {},for sql: {}", result, sqlBind.getSql()); } } } /** * create temp function * * @param connection connection * @param createFuncs createFuncs */ private void createTempFunction(Connection connection, List<String> createFuncs) throws Exception { try (Statement funcStmt = connection.createStatement()) { for (String createFunc : createFuncs) { logger.info("hive create function sql: {}", createFunc); funcStmt.execute(createFunc); } } } /** * close jdbc resource * * @param resultSet resultSet * @param pstmt pstmt * @param connection connection */ private void close(ResultSet resultSet, PreparedStatement pstmt, Connection connection) { if (resultSet != null) { try { resultSet.close(); } catch (SQLException e) { logger.error("close result set error : {}", e.getMessage(), e); } } if (pstmt != null) { try { pstmt.close(); } catch (SQLException e) { logger.error("close prepared statement error : {}", e.getMessage(), e); } } if (connection != null) { try { connection.close(); } catch (SQLException e) { logger.error("close connection error : {}", e.getMessage(), e); } } } /** * preparedStatement bind * * @param connection connection * @param sqlBinds sqlBinds * @return PreparedStatement * @throws Exception Exception */ private PreparedStatement prepareStatementAndBind(Connection connection, SqlBinds sqlBinds) { // is the timeout set boolean timeoutFlag = taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.FAILED || taskExecutionContext.getTaskTimeoutStrategy() == TaskTimeoutStrategy.WARNFAILED; try { PreparedStatement stmt = connection.prepareStatement(sqlBinds.getSql()); if (timeoutFlag) { stmt.setQueryTimeout(taskExecutionContext.getTaskTimeout()); } Map<Integer, Property> params = sqlBinds.getParamsMap(); if (params != null) { for (Map.Entry<Integer, Property> entry : params.entrySet()) { Property prop = entry.getValue(); ParameterUtils.setInParameter(entry.getKey(), stmt, prop.getType(), prop.getValue()); } } logger.info("prepare statement replace sql : {} ", stmt); return stmt; } catch (Exception exception) { throw new TaskException("SQL task prepareStatementAndBind error", exception); } } /** * regular expressions match the contents between two specified strings * * @param content content * @param rgex rgex * @param sqlParamsMap sql params map * @param paramsPropsMap params props map */ public void setSqlParamsMap(String content, String rgex, Map<Integer, Property> sqlParamsMap, Map<String, Property> paramsPropsMap) { Pattern pattern = Pattern.compile(rgex); Matcher m = pattern.matcher(content); int index = 1; while (m.find()) { String paramName = m.group(1); Property prop = paramsPropsMap.get(paramName); if (prop == null) { logger.error("setSqlParamsMap: No Property with paramName: {} is found in paramsPropsMap of task instance" + " with id: {}. So couldn't put Property in sqlParamsMap.", paramName, taskExecutionContext.getTaskInstanceId()); } else { sqlParamsMap.put(index, prop); index++; logger.info("setSqlParamsMap: Property with paramName: {} put in sqlParamsMap of content {} successfully.", paramName, content); } } } /** * print replace sql * * @param content content * @param formatSql format sql * @param rgex rgex * @param sqlParamsMap sql params map */ private void printReplacedSql(String content, String formatSql, String rgex, Map<Integer, Property> sqlParamsMap) { //parameter print style logger.info("after replace sql , preparing : {}", formatSql); StringBuilder logPrint = new StringBuilder("replaced sql , parameters:"); if (sqlParamsMap == null) { logger.info("printReplacedSql: sqlParamsMap is null."); } else { for (int i = 1; i <= sqlParamsMap.size(); i++) { logPrint.append(sqlParamsMap.get(i).getValue()).append("(").append(sqlParamsMap.get(i).getType()).append(")"); } } logger.info("Sql Params are {}", logPrint); } /** * ready to execute SQL and parameter entity Map * * @return SqlBinds */ private SqlBinds getSqlAndSqlParamsMap(String sql) { Map<Integer, Property> sqlParamsMap = new HashMap<>(); StringBuilder sqlBuilder = new StringBuilder(); // combining local and global parameters Map<String, Property> paramsMap = ParamUtils.convert(taskExecutionContext, getParameters()); // spell SQL according to the final user-defined variable if (paramsMap == null) { sqlBuilder.append(sql); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } if (StringUtils.isNotEmpty(sqlParameters.getTitle())) { String title = ParameterUtils.convertParameterPlaceholders(sqlParameters.getTitle(), ParamUtils.convert(paramsMap)); logger.info("SQL title : {}", title); sqlParameters.setTitle(title); } //new //replace variable TIME with $[YYYYmmddd...] in sql when history run job and batch complement job sql = ParameterUtils.replaceScheduleTime(sql, taskExecutionContext.getScheduleTime()); // special characters need to be escaped, ${} needs to be escaped String rgex = "['\"]*\\$\\{(.*?)\\}['\"]*"; setSqlParamsMap(sql, rgex, sqlParamsMap, paramsMap); //Replace the original value in sql !{...} ,Does not participate in precompilation String rgexo = "['\"]*\\!\\{(.*?)\\}['\"]*"; sql = replaceOriginalValue(sql, rgexo, paramsMap); // replace the ${} of the SQL statement with the Placeholder String formatSql = sql.replaceAll(rgex, "?"); sqlBuilder.append(formatSql); // print repalce sql printReplacedSql(sql, formatSql, rgex, sqlParamsMap); return new SqlBinds(sqlBuilder.toString(), sqlParamsMap); } private String replaceOriginalValue(String content, String rgex, Map<String, Property> sqlParamsMap) { Pattern pattern = Pattern.compile(rgex); while (true) { Matcher m = pattern.matcher(content); if (!m.find()) { break; } String paramName = m.group(1); String paramValue = sqlParamsMap.get(paramName).getValue(); content = m.replaceFirst(paramValue); } return content; } /** * create function list * * @param udfFuncTenantCodeMap key is udf function,value is tenant code * @param logger logger * @return create function list */ public static List<String> createFuncs(Map<UdfFuncRequest, String> udfFuncTenantCodeMap, Logger logger) { if (MapUtils.isEmpty(udfFuncTenantCodeMap)) { logger.info("can't find udf function resource"); return null; } List<String> funcList = new ArrayList<>(); // build temp function sql buildTempFuncSql(funcList, new ArrayList<>(udfFuncTenantCodeMap.keySet())); return funcList; } /** * build temp function sql * * @param sqls sql list * @param udfFuncRequests udf function list */ private static void buildTempFuncSql(List<String> sqls, List<UdfFuncRequest> udfFuncRequests) { if (CollectionUtils.isNotEmpty(udfFuncRequests)) { for (UdfFuncRequest udfFuncRequest : udfFuncRequests) { sqls.add(MessageFormat .format(CREATE_FUNCTION_FORMAT, udfFuncRequest.getFuncName(), udfFuncRequest.getClassName())); } } } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,393
[Bug] [GA] Missing issue action translate-on-issue
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I create new issue, such as #6390, I received a mail list said my issue-robot jobs failed. I wonder what jobs we runs after issue created, and why it alway faileds, related discussions https://github.com/apache/dolphinscheduler/discussions/6391 ### What you expected to happen Issue bot should not failed when create new issue ### How to reproduce Create a new issue ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6393
https://github.com/apache/dolphinscheduler/pull/6394
c013b49e728d942bf22574f1b59d4c5a9e69a9f3
c05e407b15fd7d64fb57471e39c6647952721d4c
"2021-09-28T07:52:51Z"
java
"2021-09-28T13:06:50Z"
.github/actions/translate-on-issue
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,393
[Bug] [GA] Missing issue action translate-on-issue
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When I create new issue, such as #6390, I received a mail list said my issue-robot jobs failed. I wonder what jobs we runs after issue created, and why it alway faileds, related discussions https://github.com/apache/dolphinscheduler/discussions/6391 ### What you expected to happen Issue bot should not failed when create new issue ### How to reproduce Create a new issue ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6393
https://github.com/apache/dolphinscheduler/pull/6394
c013b49e728d942bf22574f1b59d4c5a9e69a9f3
c05e407b15fd7d64fb57471e39c6647952721d4c
"2021-09-28T07:52:51Z"
java
"2021-09-28T13:06:50Z"
.gitmodules
# Licensed to Apache Software Foundation (ASF) under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Apache Software Foundation (ASF) licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. [submodule ".github/actions/comment-on-issue"] path = .github/actions/comment-on-issue url = https://github.com/xingchun-chen/actions-comment-on-issue [submodule ".github/actions/lable-on-issue"] path = .github/actions/lable-on-issue url = https://github.com/xingchun-chen/labeler [submodule ".github/actions/translate-on-issue"] path = .github/actions/translate-on-issue url = https://github.com/xingchun-chen/translation-helper.git [submodule ".github/actions/reviewdog-setup"] path = .github/actions/reviewdog-setup url = https://github.com/reviewdog/action-setup
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,298
[Bug] [Server] Task plugin dir should be included in install and config file
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If we followed the doc to deploy cluster, we will find the worker cannot find the task plugin because the task plugin dir is empty by default ### What you expected to happen Related info should be added in the install file ### How to reproduce just follow the doc to deploy a cluster DS ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6298
https://github.com/apache/dolphinscheduler/pull/6299
f5dde9fb5e59adf61e6283951c99569ff35d4141
d184ebda1e7fff1250858386debb7a2a0435bb78
"2021-09-23T01:55:42Z"
java
"2021-09-29T03:36:51Z"
dolphinscheduler-server/src/main/resources/config/install_config.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTICE: If the following config has special characters in the variable `.*[]^${}\+?|()@#&`, Please escape, for example, `[` escape to `\[` # postgresql or mysql dbtype="mysql" # db config # db address and port dbhost="192.168.xx.xx:3306" # db username username="xx" # db password # NOTICE: if there are special characters, please use the \ to escape, for example, `[` escape to `\[` password="xx" # database name dbname="dolphinscheduler" # zk cluster zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # zk root directory zkRoot="/dolphinscheduler" # registry config # registry plugin dir # Note: find and load the Registry Plugin Jar from this dir. registryPluginDir="/data1_1T/dolphinscheduler/lib/plugin/registry" registryPluginName="zookeeper" registryServers="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181" # Note: the target installation path for dolphinscheduler, please not config as the same as the current path (pwd) installPath="/data1_1T/dolphinscheduler" # deployment user # Note: the deployment user needs to have sudo privileges and permissions to operate hdfs. If hdfs is enabled, the root directory needs to be created by itself deployUser="dolphinscheduler" # alert config # alert plugin dir # Note: find and load the Alert Plugin Jar from this dir. alertPluginDir="/data1_1T/dolphinscheduler/lib/plugin/alert" # user data local directory path, please make sure the directory exists and have read write permissions dataBasedirPath="/tmp/dolphinscheduler" # resource storage type: HDFS, S3, NONE resourceStorageType="NONE" # resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended resourceUploadPath="/dolphinscheduler" # if resourceStorageType is HDFS,defaultFS write namenode address,HA you need to put core-site.xml and hdfs-site.xml in the conf directory. # if S3,write S3 address,HA,for example :s3a://dolphinscheduler, # Note,s3 be sure to create the root directory /dolphinscheduler defaultFS="hdfs://mycluster:8020" # if resourceStorageType is S3, the following three configuration is required, otherwise please ignore s3Endpoint="http://192.168.xx.xx:9010" s3AccessKey="xxxxxxxxxx" s3SecretKey="xxxxxxxxxx" # resourcemanager port, the default value is 8088 if not specified resourceManagerHttpAddressPort="8088" # if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty yarnHaIps="192.168.xx.xx,192.168.xx.xx" # if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname singleYarnIp="yarnIp1" # who have permissions to create directory under HDFS/S3 root path # Note: if kerberos is enabled, please config hdfsRootUser= hdfsRootUser="hdfs" # kerberos config # whether kerberos starts, if kerberos starts, following four items need to config, otherwise please ignore kerberosStartUp="false" # kdc krb5 config file path krb5ConfPath="$installPath/conf/krb5.conf" # keytab username,watch out the @ sign should followd by \\ keytabUserName="hdfs-mycluster\\@ESZ.COM" # username keytab path keytabPath="$installPath/conf/hdfs.headless.keytab" # kerberos expire time, the unit is hour kerberosExpireTime="2" # use sudo or not sudoEnable="true" # worker tenant auto create workerTenantAutoCreate="false" # api server port apiServerPort="12345" # install hosts # Note: install the scheduled hostname list. If it is pseudo-distributed, just write a pseudo-distributed hostname ips="ds1,ds2,ds3,ds4,ds5" # ssh port, default 22 # Note: if ssh port is not default, modify here sshPort="22" # run master machine # Note: list of hosts hostname for deploying master masters="ds1,ds2" # run worker machine # note: need to write the worker group name of each worker, the default value is "default" workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default" # run alert machine # note: list of machine hostnames for deploying alert server alertServer="ds3" # run api machine # note: list of machine hostnames for deploying api server apiServers="ds1"
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,298
[Bug] [Server] Task plugin dir should be included in install and config file
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened If we followed the doc to deploy cluster, we will find the worker cannot find the task plugin because the task plugin dir is empty by default ### What you expected to happen Related info should be added in the install file ### How to reproduce just follow the doc to deploy a cluster DS ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6298
https://github.com/apache/dolphinscheduler/pull/6299
f5dde9fb5e59adf61e6283951c99569ff35d4141
d184ebda1e7fff1250858386debb7a2a0435bb78
"2021-09-23T01:55:42Z"
java
"2021-09-29T03:36:51Z"
install.sh
#!/bin/sh # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workDir=`dirname $0` workDir=`cd ${workDir};pwd` source ${workDir}/conf/config/install_config.conf # 1.replace file echo "1.replace file" txt="" if [[ "$OSTYPE" == "darwin"* ]]; then # Mac OSX txt="''" fi datasourceDriverClassname="com.mysql.jdbc.Driver" if [ $dbtype == "postgresql" ];then datasourceDriverClassname="org.postgresql.Driver" fi sed -i ${txt} "s@^spring.datasource.driver-class-name=.*@spring.datasource.driver-class-name=${datasourceDriverClassname}@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.url=.*@spring.datasource.url=jdbc:${dbtype}://${dbhost}/${dbname}?characterEncoding=UTF-8\&allowMultiQueries=true@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.username=.*@spring.datasource.username=${username}@g" conf/datasource.properties sed -i ${txt} "s@^spring.datasource.password=.*@spring.datasource.password=${password}@g" conf/datasource.properties sed -i ${txt} "s@^data.basedir.path=.*@data.basedir.path=${dataBasedirPath}@g" conf/common.properties sed -i ${txt} "s@^resource.storage.type=.*@resource.storage.type=${resourceStorageType}@g" conf/common.properties sed -i ${txt} "s@^resource.upload.path=.*@resource.upload.path=${resourceUploadPath}@g" conf/common.properties sed -i ${txt} "s@^hadoop.security.authentication.startup.state=.*@hadoop.security.authentication.startup.state=${kerberosStartUp}@g" conf/common.properties sed -i ${txt} "s@^java.security.krb5.conf.path=.*@java.security.krb5.conf.path=${krb5ConfPath}@g" conf/common.properties sed -i ${txt} "s@^login.user.keytab.username=.*@login.user.keytab.username=${keytabUserName}@g" conf/common.properties sed -i ${txt} "s@^login.user.keytab.path=.*@login.user.keytab.path=${keytabPath}@g" conf/common.properties sed -i ${txt} "s@^kerberos.expire.time=.*@kerberos.expire.time=${kerberosExpireTime}@g" conf/common.properties sed -i ${txt} "s@^hdfs.root.user=.*@hdfs.root.user=${hdfsRootUser}@g" conf/common.properties sed -i ${txt} "s@^fs.defaultFS=.*@fs.defaultFS=${defaultFS}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.endpoint=.*@fs.s3a.endpoint=${s3Endpoint}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.access.key=.*@fs.s3a.access.key=${s3AccessKey}@g" conf/common.properties sed -i ${txt} "s@^fs.s3a.secret.key=.*@fs.s3a.secret.key=${s3SecretKey}@g" conf/common.properties sed -i ${txt} "s@^resource.manager.httpaddress.port=.*@resource.manager.httpaddress.port=${resourceManagerHttpAddressPort}@g" conf/common.properties sed -i ${txt} "s@^yarn.resourcemanager.ha.rm.ids=.*@yarn.resourcemanager.ha.rm.ids=${yarnHaIps}@g" conf/common.properties sed -i ${txt} "s@^yarn.application.status.address=.*@yarn.application.status.address=http://${singleYarnIp}:%s/ws/v1/cluster/apps/%s@g" conf/common.properties sed -i ${txt} "s@^yarn.job.history.status.address=.*@yarn.job.history.status.address=http://${singleYarnIp}:19888/ws/v1/history/mapreduce/jobs/%s@g" conf/common.properties sed -i ${txt} "s@^sudo.enable=.*@sudo.enable=${sudoEnable}@g" conf/common.properties # the following configurations may be commented, so ddd #\? to ensure successful sed sed -i ${txt} "s@^#\?worker.tenant.auto.create=.*@worker.tenant.auto.create=${workerTenantAutoCreate}@g" conf/worker.properties sed -i ${txt} "s@^#\?alert.listen.host=.*@alert.listen.host=${alertServer}@g" conf/worker.properties sed -i ${txt} "s@^#\?alert.plugin.dir=.*@alert.plugin.dir=${alertPluginDir}@g" conf/alert.properties sed -i ${txt} "s@^#\?server.port=.*@server.port=${apiServerPort}@g" conf/application-api.properties sed -i ${txt} "s@^#\?registry.plugin.dir=.*@registry.plugin.dir=${registryPluginDir}@g" conf/registry.properties sed -i ${txt} "s@^#\?registry.plugin.name=.*@registry.plugin.name=${registryPluginName}@g" conf/registry.properties sed -i ${txt} "s@^#\?registry.servers=.*@registry.servers=${registryServers}@g" conf/registry.properties # 2.create directory echo "2.create directory" if [ ! -d $installPath ];then sudo mkdir -p $installPath sudo chown -R $deployUser:$deployUser $installPath fi # 3.scp resources echo "3.scp resources" sh ${workDir}/script/scp-hosts.sh if [ $? -eq 0 ] then echo 'scp copy completed' else echo 'scp copy failed to exit' exit 1 fi # 4.stop server echo "4.stop server" sh ${workDir}/script/stop-all.sh # 5.delete zk node echo "5.delete zk node" sh ${workDir}/script/remove-zk-node.sh $zkRoot # 6.startup echo "6.startup" sh ${workDir}/script/start-all.sh
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,412
[BUG][Task-API]load codec class fail
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar issues. ### What happened When using codec-related classes, the NoClassDefFoundError is caused due to the exclusion of related packages ERROR] 2021-09-29 12:18:56.119 org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread:[216] - task scheduler failure java.lang.NoClassDefFoundError: org/apache/commons/codec/binary/Base64 at org.apache.dolphinscheduler.plugin.task.datasource.PasswordUtils.<clinit>(PasswordUtils.java:38) at org.apache.dolphinscheduler.plugin.task.datasource.mysql.MysqlDatasourceProcessor.getConnection(MysqlDatasourceProcessor.java:126) at org.apache.dolphinscheduler.plugin.task.datasource.DatasourceUtil.getConnection(DatasourceUtil.java:59) at org.apache.dolphinscheduler.plugin.task.sql.SqlTask.executeFuncAndSql(SqlTask.java:181) at org.apache.dolphinscheduler.plugin.task.sql.SqlTask.handle(SqlTask.java:152) at org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread.run(TaskExecuteThread.java:202) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.ClassNotFoundException: org.apache.commons.codec.binary.Base64 at java.net.URLClassLoader.findClass(URLClassLoader.java:381) at java.lang.ClassLoader.loadClass(ClassLoader.java:424) at org.apache.dolphinscheduler.spi.plugin.DolphinPluginClassLoader.loadClass(DolphinPluginClassLoader.java:81) at java.lang.ClassLoader.loadClass(ClassLoader.java:357) ... 11 common frames omitted ### What you expected to happen normal run ### How to reproduce Run a SQL task. The data source uses MySql or others that require a password encoder. ### Anything else _No response_ ### Are you willing to submit PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6412
https://github.com/apache/dolphinscheduler/pull/6411
ea6503b99742ab72d99527edb3abdad398bee83e
1843e986f5af15e92f80fa756cfe229dcc2b8c1e
"2021-09-29T06:36:45Z"
java
"2021-09-29T08:03:06Z"
dolphinscheduler-task-plugin/dolphinscheduler-task-api/pom.xml
<?xml version="1.0" encoding="UTF-8"?> <!-- ~ Licensed to the Apache Software Foundation (ASF) under one or more ~ contributor license agreements. See the NOTICE file distributed with ~ this work for additional information regarding copyright ownership. ~ The ASF licenses this file to You under the Apache License, Version 2.0 ~ (the "License"); you may not use this file except in compliance with ~ the License. You may obtain a copy of the License at ~ ~ http://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ~ See the License for the specific language governing permissions and ~ limitations under the License. --> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <parent> <artifactId>dolphinscheduler-task-plugin</artifactId> <groupId>org.apache.dolphinscheduler</groupId> <version>2.0.0-SNAPSHOT</version> </parent> <modelVersion>4.0.0</modelVersion> <packaging>jar</packaging> <artifactId>dolphinscheduler-task-api</artifactId> <dependencies> <dependency> <groupId>org.apache.dolphinscheduler</groupId> <artifactId>dolphinscheduler-spi</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>commons-io</groupId> <artifactId>commons-io</artifactId> </dependency> <dependency> <groupId>com.google.guava</groupId> <artifactId>guava</artifactId> </dependency> <dependency> <groupId>ch.qos.logback</groupId> <artifactId>logback-classic</artifactId> </dependency> <dependency> <groupId>org.slf4j</groupId> <artifactId>slf4j-api</artifactId> <scope>provided</scope> </dependency> <dependency> <groupId>org.apache.hive</groupId> <artifactId>hive-jdbc</artifactId> <scope>provided</scope> <exclusions> <exclusion> <artifactId>slf4j-log4j12</artifactId> <groupId>org.slf4j</groupId> </exclusion> <exclusion> <groupId>org.eclipse.jetty.aggregate</groupId> <artifactId>jetty-all</artifactId> </exclusion> <exclusion> <groupId>org.apache.ant</groupId> <artifactId>ant</artifactId> </exclusion> <exclusion> <groupId>io.dropwizard.metrics</groupId> <artifactId>metrics-json</artifactId> </exclusion> <exclusion> <groupId>io.dropwizard.metrics</groupId> <artifactId>metrics-jvm</artifactId> </exclusion> <exclusion> <groupId>com.github.joshelser</groupId> <artifactId>dropwizard-metrics-hadoop-metrics2-reporter</artifactId> </exclusion> <exclusion> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId> </exclusion> <exclusion> <groupId>com.google.code.gson</groupId> <artifactId>gson</artifactId> </exclusion> <exclusion> <groupId>com.google.code.findbugs</groupId> <artifactId>jsr305</artifactId> </exclusion> <exclusion> <groupId>io.dropwizard.metrics</groupId> <artifactId>metrics-core</artifactId> </exclusion> <exclusion> <groupId>javax.servlet</groupId> <artifactId>servlet-api</artifactId> </exclusion> <exclusion> <groupId>org.apache.avro</groupId> <artifactId>avro</artifactId> </exclusion> <exclusion> <groupId>org.apache.commons</groupId> <artifactId>commons-compress</artifactId> </exclusion> <exclusion> <groupId>org.apache.curator</groupId> <artifactId>curator-client</artifactId> </exclusion> <exclusion> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-auth</artifactId> </exclusion> <exclusion> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-mapreduce-client-core</artifactId> </exclusion> <exclusion> <groupId>org.apache.hadoop</groupId> <artifactId>hadoop-yarn-api</artifactId> </exclusion> <exclusion> <groupId>org.apache.zookeeper</groupId> <artifactId>zookeeper</artifactId> </exclusion> <exclusion> <groupId>org.codehaus.jackson</groupId> <artifactId>jackson-jaxrs</artifactId> </exclusion> <exclusion> <groupId>org.codehaus.jackson</groupId> <artifactId>jackson-xc</artifactId> </exclusion> <exclusion> <groupId>com.google.protobuf</groupId> <artifactId>protobuf-java</artifactId> </exclusion> <exclusion> <groupId>org.json</groupId> <artifactId>json</artifactId> </exclusion> <exclusion> <artifactId>log4j-slf4j-impl</artifactId> <groupId>org.apache.logging.log4j</groupId> </exclusion> <exclusion> <artifactId>javax.servlet</artifactId> <groupId>org.eclipse.jetty.orbit</groupId> </exclusion> <exclusion> <artifactId>servlet-api-2.5</artifactId> <groupId>org.mortbay.jetty</groupId> </exclusion> <exclusion> <artifactId>jasper-runtime</artifactId> <groupId>tomcat</groupId> </exclusion> <exclusion> <artifactId>slider-core</artifactId> <groupId>org.apache.slider</groupId> </exclusion> <exclusion> <artifactId>hbase-server</artifactId> <groupId>org.apache.hbase</groupId> </exclusion> <exclusion> <artifactId>jersey-client</artifactId> <groupId>com.sun.jersey</groupId> </exclusion> <exclusion> <artifactId>jersey-core</artifactId> <groupId>com.sun.jersey</groupId> </exclusion> <exclusion> <artifactId>jersey-json</artifactId> <groupId>com.sun.jersey</groupId> </exclusion> <exclusion> <artifactId>jersey-server</artifactId> <groupId>com.sun.jersey</groupId> </exclusion> <exclusion> <artifactId>jersey-guice</artifactId> <groupId>com.sun.jersey.contribs</groupId> </exclusion> <exclusion> <artifactId>hbase-common</artifactId> <groupId>org.apache.hbase</groupId> </exclusion> <exclusion> <artifactId>hbase-hadoop2-compat</artifactId> <groupId>org.apache.hbase</groupId> </exclusion> <exclusion> <artifactId>hbase-client</artifactId> <groupId>org.apache.hbase</groupId> </exclusion> <exclusion> <artifactId>hbase-hadoop-compat</artifactId> <groupId>org.apache.hbase</groupId> </exclusion> <exclusion> <artifactId>tephra-hbase-compat-1.0</artifactId> <groupId>co.cask.tephra</groupId> </exclusion> <exclusion> <artifactId>jaxb-api</artifactId> <groupId>javax.xml.bind</groupId> </exclusion> <exclusion> <artifactId>hive-llap-client</artifactId> <groupId>org.apache.hive</groupId> </exclusion> <exclusion> <artifactId>hive-llap-common</artifactId> <groupId>org.apache.hive</groupId> </exclusion> <exclusion> <artifactId>hive-llap-server</artifactId> <groupId>org.apache.hive</groupId> </exclusion> <exclusion> <artifactId>tephra-core</artifactId> <groupId>co.cask.tephra</groupId> </exclusion> <exclusion> <artifactId>ant</artifactId> <groupId>ant</groupId> </exclusion> <exclusion> <artifactId>stringtemplate</artifactId> <groupId>org.antlr</groupId> </exclusion> <exclusion> <artifactId>antlr-runtime</artifactId> <groupId>org.antlr</groupId> </exclusion> <exclusion> <artifactId>hive-shims</artifactId> <groupId>org.apache.hive</groupId> </exclusion> <exclusion> <artifactId>jsp-api</artifactId> <groupId>javax.servlet</groupId> </exclusion> <exclusion> <artifactId>log4j-api</artifactId> <groupId>org.apache.logging.log4j</groupId> </exclusion> <exclusion> <artifactId>log4j-core</artifactId> <groupId>org.apache.logging.log4j</groupId> </exclusion> <exclusion> <artifactId>log4j-web</artifactId> <groupId>org.apache.logging.log4j</groupId> </exclusion> <exclusion> <artifactId>jasper-compiler</artifactId> <groupId>tomcat</groupId> </exclusion> </exclusions> </dependency> </dependencies> <build> <finalName>dolphinscheduler-task-api-${project.version}</finalName> </build> </project>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,424
[Feature][UI] sort the task plugin order.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description sort the task plugin order: move the waterdrop to the end. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6424
https://github.com/apache/dolphinscheduler/pull/6425
a9d805d79117047e571deeb52fbf96531c19674b
9c07c7d7ed3bf9387b2e9986a56be560595018a1
"2021-09-30T03:24:33Z"
java
"2021-09-30T10:23:42Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/config.js
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import i18n from '@/module/i18n' /** * Operation bar config * @code code * @icon icon * @disable disable * @desc tooltip */ const toolOper = (dagThis) => { const disabled = !!dagThis.$store.state.dag.isDetails// Permissions.getAuth() === false ? false : !dagThis.$store.state.dag.isDetails return [ { code: 'pointer', icon: 'el-icon-thumb', disable: disabled, desc: `${i18n.$t('Drag Nodes and Selected Items')}` }, { code: 'line', icon: 'el-icon-top-right', disable: disabled, desc: `${i18n.$t('Select Line Connection')}` }, { code: 'remove', icon: 'el-icon-delete', disable: disabled, desc: `${i18n.$t('Delete selected lines or nodes')}` }, { code: 'download', icon: 'el-icon-download', disable: !dagThis.type, desc: `${i18n.$t('Download')}` }, { code: 'screen', icon: 'el-icon-full-screen', disable: false, desc: `${i18n.$t('Full Screen')}` } ] } /** * Post status * @id Front end definition id * @desc tooltip * @code Backend definition identifier */ const publishStatus = [ { id: 0, desc: `${i18n.$t('Unpublished')}`, code: 'NOT_RELEASE' }, { id: 1, desc: `${i18n.$t('online')}`, code: 'ONLINE' }, { id: 2, desc: `${i18n.$t('offline')}`, code: 'OFFLINE' } ] /** * Operation type * @desc tooltip * @code identifier */ const runningType = [ { desc: `${i18n.$t('Start Process')}`, code: 'START_PROCESS' }, { desc: `${i18n.$t('Execute from the current node')}`, code: 'START_CURRENT_TASK_PROCESS' }, { desc: `${i18n.$t('Recover tolerance fault process')}`, code: 'RECOVER_TOLERANCE_FAULT_PROCESS' }, { desc: `${i18n.$t('Resume the suspension process')}`, code: 'RECOVER_SUSPENDED_PROCESS' }, { desc: `${i18n.$t('Execute from the failed nodes')}`, code: 'START_FAILURE_TASK_PROCESS' }, { desc: `${i18n.$t('Complement Data')}`, code: 'COMPLEMENT_DATA' }, { desc: `${i18n.$t('Scheduling execution')}`, code: 'SCHEDULER' }, { desc: `${i18n.$t('Rerun')}`, code: 'REPEAT_RUNNING' }, { desc: `${i18n.$t('Pause')}`, code: 'PAUSE' }, { desc: `${i18n.$t('Stop')}`, code: 'STOP' }, { desc: `${i18n.$t('Recovery waiting thread')}`, code: 'RECOVER_WAITING_THREAD' } ] /** * Task status * @key key * @id id * @desc tooltip * @color color * @icoUnicode iconfont * @isSpin is loading (Need to execute the code block to write if judgment) */ const tasksState = { SUBMITTED_SUCCESS: { id: 0, desc: `${i18n.$t('Submitted successfully')}`, color: '#A9A9A9', icoUnicode: 'ri-record-circle-fill', isSpin: false }, RUNNING_EXECUTION: { id: 1, desc: `${i18n.$t('Executing')}`, color: '#0097e0', icoUnicode: 'el-icon-s-tools', isSpin: true }, READY_PAUSE: { id: 2, desc: `${i18n.$t('Ready to pause')}`, color: '#07b1a3', icoUnicode: 'ri-settings-3-line', isSpin: false }, PAUSE: { id: 3, desc: `${i18n.$t('Pause')}`, color: '#057c72', icoUnicode: 'el-icon-video-pause', isSpin: false }, READY_STOP: { id: 4, desc: `${i18n.$t('Ready to stop')}`, color: '#FE0402', icoUnicode: 'ri-stop-circle-fill', isSpin: false }, STOP: { id: 5, desc: `${i18n.$t('Stop')}`, color: '#e90101', icoUnicode: 'ri-stop-circle-line', isSpin: false }, FAILURE: { id: 6, desc: `${i18n.$t('Failed')}`, color: '#000000', icoUnicode: 'el-icon-circle-close', isSpin: false }, SUCCESS: { id: 7, desc: `${i18n.$t('Success')}`, color: '#33cc00', icoUnicode: 'el-icon-circle-check', isSpin: false }, NEED_FAULT_TOLERANCE: { id: 8, desc: `${i18n.$t('Need fault tolerance')}`, color: '#FF8C00', icoUnicode: 'el-icon-edit', isSpin: false }, KILL: { id: 9, desc: `${i18n.$t('Kill')}`, color: '#a70202', icoUnicode: 'el-icon-remove-outline', isSpin: false }, WAITING_THREAD: { id: 10, desc: `${i18n.$t('Waiting for thread')}`, color: '#912eed', icoUnicode: 'ri-time-line', isSpin: false }, WAITING_DEPEND: { id: 11, desc: `${i18n.$t('Waiting for dependence')}`, color: '#5101be', icoUnicode: 'ri-send-to-back', isSpin: false }, DELAY_EXECUTION: { id: 12, desc: `${i18n.$t('Delay execution')}`, color: '#5102ce', icoUnicode: 'ri-pause-circle-fill', isSpin: false }, FORCED_SUCCESS: { id: 13, desc: `${i18n.$t('Forced success')}`, color: '#5102ce', icoUnicode: 'el-icon-success', isSpin: false } } /** * Node type * @key key * @desc tooltip * @color color (tree and gantt) */ const tasksType = { SHELL: { desc: 'SHELL', color: '#646464' }, WATERDROP: { desc: 'WATERDROP', color: '#646465' }, SUB_PROCESS: { desc: 'SUB_PROCESS', color: '#0097e0' }, PROCEDURE: { desc: 'PROCEDURE', color: '#525CCD' }, SQL: { desc: 'SQL', color: '#7A98A1' }, SPARK: { desc: 'SPARK', color: '#E46F13' }, FLINK: { desc: 'FLINK', color: '#E46F13' }, MR: { desc: 'MapReduce', color: '#A0A5CC' }, PYTHON: { desc: 'PYTHON', color: '#FED52D' }, DEPENDENT: { desc: 'DEPENDENT', color: '#2FBFD8' }, HTTP: { desc: 'HTTP', color: '#E46F13' }, DATAX: { desc: 'DataX', color: '#1fc747' }, TIS: { desc: 'TIS', color: '#1fc747' }, SQOOP: { desc: 'SQOOP', color: '#E46F13' }, CONDITIONS: { desc: 'CONDITIONS', color: '#E46F13' }, SWITCH: { desc: 'SWITCH', color: '#E46F13' } } export { toolOper, publishStatus, runningType, tasksState, tasksType }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,424
[Feature][UI] sort the task plugin order.
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description sort the task plugin order: move the waterdrop to the end. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6424
https://github.com/apache/dolphinscheduler/pull/6425
a9d805d79117047e571deeb52fbf96531c19674b
9c07c7d7ed3bf9387b2e9986a56be560595018a1
"2021-09-30T03:24:33Z"
java
"2021-09-30T10:23:42Z"
dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/formModel/formModel.vue
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ <template> <div class="form-model-wrapper" v-clickoutside="_handleClose"> <div class="title-box"> <span class="name">{{ $t("Current node settings") }}</span> <span class="go-subtask"> <!-- Component can't pop up box to do component processing --> <m-log v-if="type === 'instance' && taskInstance" :item="backfillItem" :task-instance-id="taskInstance.id" > <template slot="history" ><a href="javascript:" @click="_seeHistory" ><em class="ansicon el-icon-alarm-clock"></em ><em>{{ $t("View history") }}</em></a ></template > <template slot="log" ><a href="javascript:" ><em class="ansicon el-icon-document"></em ><em>{{ $t("View log") }}</em></a ></template > </m-log> <a href="javascript:" @click="_goSubProcess" v-if="_isGoSubProcess" ><em class="ansicon ri-node-tree"></em ><em>{{ $t("Enter this child node") }}</em></a > </span> </div> <div class="content-box" v-if="isContentBox"> <div class="form-model"> <!-- Reference from task --> <!-- <reference-from-task :taskType="nodeData.taskType" /> --> <!-- Node name --> <m-list-box> <div slot="text">{{ $t("Node name") }}</div> <div slot="content"> <el-input type="text" v-model="name" size="small" :disabled="isDetails" :placeholder="$t('Please enter name (required)')" maxlength="100" @blur="_verifName()" > </el-input> </div> </m-list-box> <!-- Running sign --> <m-list-box> <div slot="text">{{ $t("Run flag") }}</div> <div slot="content"> <el-radio-group v-model="runFlag" size="small"> <el-radio :label="'YES'" :disabled="isDetails">{{ $t("Normal") }}</el-radio> <el-radio :label="'NO'" :disabled="isDetails">{{ $t("Prohibition execution") }}</el-radio> </el-radio-group> </div> </m-list-box> <!-- description --> <m-list-box> <div slot="text">{{ $t("Description") }}</div> <div slot="content"> <el-input :rows="2" type="textarea" :disabled="isDetails" v-model="desc" :placeholder="$t('Please enter description')" > </el-input> </div> </m-list-box> <!-- Task priority --> <m-list-box> <div slot="text">{{ $t("Task priority") }}</div> <div slot="content"> <span class="label-box" style="width: 193px; display: inline-block"> <m-priority v-model="taskInstancePriority"></m-priority> </span> </div> </m-list-box> <!-- Worker group and environment --> <m-list-box> <div slot="text">{{ $t("Worker group") }}</div> <div slot="content"> <span class="label-box" style="width: 193px; display: inline-block"> <m-worker-groups v-model="workerGroup"></m-worker-groups> </span> <span class="text-b">{{ $t("Environment Name") }}</span> <m-related-environment v-model="environmentCode" :workerGroup="workerGroup" :isNewCreate="isNewCreate" v-on:environmentCodeEvent="_onUpdateEnvironmentCode" ></m-related-environment> </div> </m-list-box> <!-- Number of failed retries --> <m-list-box v-if="nodeData.taskType !== 'SUB_PROCESS'"> <div slot="text">{{ $t("Number of failed retries") }}</div> <div slot="content"> <m-select-input v-model="maxRetryTimes" :list="[0, 1, 2, 3, 4]" ></m-select-input> <span>({{ $t("Times") }})</span> <span class="text-b">{{ $t("Failed retry interval") }}</span> <m-select-input v-model="retryInterval" :list="[1, 10, 30, 60, 120]" ></m-select-input> <span>({{ $t("Minute") }})</span> </div> </m-list-box> <!-- Delay execution time --> <m-list-box v-if=" nodeData.taskType !== 'SUB_PROCESS' && nodeData.taskType !== 'CONDITIONS' && nodeData.taskType !== 'DEPENDENT' && nodeData.taskType !== 'SWITCH' " > <div slot="text">{{ $t("Delay execution time") }}</div> <div slot="content"> <m-select-input v-model="delayTime" :list="[0, 1, 5, 10]" ></m-select-input> <span>({{ $t("Minute") }})</span> </div> </m-list-box> <!-- Branch flow --> <m-list-box v-if="nodeData.taskType === 'CONDITIONS'"> <div slot="text">{{ $t("State") }}</div> <div slot="content"> <span class="label-box" style="width: 193px; display: inline-block"> <el-select style="width: 157px" size="small" v-model="successNode" :disabled="true" > <el-option v-for="item in stateList" :key="item.value" :value="item.value" :label="item.label" ></el-option> </el-select> </span> <span class="text-b" style="padding-left: 38px">{{ $t("Branch flow") }}</span> <el-select style="width: 157px" size="small" v-model="successBranch" clearable :disabled="isDetails" > <el-option v-for="item in postTasks" :key="item.code" :value="item.name" :label="item.name" ></el-option> </el-select> </div> </m-list-box> <m-list-box v-if="nodeData.taskType === 'CONDITIONS'"> <div slot="text">{{ $t("State") }}</div> <div slot="content"> <span class="label-box" style="width: 193px; display: inline-block"> <el-select style="width: 157px" size="small" v-model="failedNode" :disabled="true" > <el-option v-for="item in stateList" :key="item.value" :value="item.value" :label="item.label" ></el-option> </el-select> </span> <span class="text-b" style="padding-left: 38px">{{ $t("Branch flow") }}</span> <el-select style="width: 157px" size="small" v-model="failedBranch" clearable :disabled="isDetails" > <el-option v-for="item in postTasks" :key="item.code" :value="item.name" :label="item.name" ></el-option> </el-select> </div> </m-list-box> <div v-if="backfillRefresh"> <!-- Task timeout alarm --> <m-timeout-alarm v-if="nodeData.taskType !== 'DEPENDENT'" ref="timeout" :backfill-item="backfillItem" @on-timeout="_onTimeout" > </m-timeout-alarm> <!-- Dependent timeout alarm --> <m-dependent-timeout v-if="nodeData.taskType === 'DEPENDENT'" ref="dependentTimeout" :backfill-item="backfillItem" @on-timeout="_onDependentTimeout" > </m-dependent-timeout> <!-- shell node --> <m-shell v-if="nodeData.taskType === 'SHELL'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="SHELL" :backfill-item="backfillItem" > </m-shell> <!-- waterdrop node --> <m-waterdrop v-if="nodeData.taskType === 'WATERDROP'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="WATERDROP" :backfill-item="backfillItem" > </m-waterdrop> <!-- sub_process node --> <m-sub-process v-if="nodeData.taskType === 'SUB_PROCESS'" @on-params="_onParams" @on-cache-params="_onCacheParams" @on-set-process-name="_onSetProcessName" ref="SUB_PROCESS" :backfill-item="backfillItem" > </m-sub-process> <!-- procedure node --> <m-procedure v-if="nodeData.taskType === 'PROCEDURE'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="PROCEDURE" :backfill-item="backfillItem" > </m-procedure> <!-- sql node --> <m-sql v-if="nodeData.taskType === 'SQL'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="SQL" :create-node-id="nodeData.id" :backfill-item="backfillItem" > </m-sql> <!-- spark node --> <m-spark v-if="nodeData.taskType === 'SPARK'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="SPARK" :backfill-item="backfillItem" > </m-spark> <m-flink v-if="nodeData.taskType === 'FLINK'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="FLINK" :backfill-item="backfillItem" > </m-flink> <!-- mr node --> <m-mr v-if="nodeData.taskType === 'MR'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="MR" :backfill-item="backfillItem" > </m-mr> <!-- python node --> <m-python v-if="nodeData.taskType === 'PYTHON'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="PYTHON" :backfill-item="backfillItem" > </m-python> <!-- dependent node --> <m-dependent v-if="nodeData.taskType === 'DEPENDENT'" @on-dependent="_onDependent" @on-cache-dependent="_onCacheDependent" ref="DEPENDENT" :backfill-item="backfillItem" > </m-dependent> <m-http v-if="nodeData.taskType === 'HTTP'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="HTTP" :backfill-item="backfillItem" > </m-http> <m-datax v-if="nodeData.taskType === 'DATAX'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="DATAX" :backfill-item="backfillItem" > </m-datax> <m-tis v-if="nodeData.taskType === 'TIS'" @on-params="_onParams" @on-cache-params="_onCacheParams" :backfill-item="backfillItem" ref="TIS"> </m-tis> <m-sqoop v-if="nodeData.taskType === 'SQOOP'" @on-params="_onParams" @on-cache-params="_onCacheParams" ref="SQOOP" :backfill-item="backfillItem" > </m-sqoop> <m-conditions v-if="nodeData.taskType === 'CONDITIONS'" ref="CONDITIONS" @on-dependent="_onDependent" @on-cache-dependent="_onCacheDependent" :backfill-item="backfillItem" :prev-tasks="prevTasks" > </m-conditions> <m-switch v-if="nodeData.taskType === 'SWITCH'" ref="SWITCH" @on-switch-result="_onSwitchResult" :backfill-item="backfillItem" :nodeData="nodeData" ></m-switch> </div> <!-- Pre-tasks in workflow --> <m-pre-tasks ref="preTasks" v-if="['SHELL', 'SUB_PROCESS'].indexOf(nodeData.taskType) > -1" :code="code" /> </div> </div> <div class="bottom-box"> <div class="submit" style="background: #fff"> <el-button type="text" size="small" id="cancelBtn"> {{ $t("Cancel") }} </el-button> <el-button type="primary" size="small" round :loading="spinnerLoading" @click="ok()" :disabled="isDetails" >{{ spinnerLoading ? $t("Loading...") : $t("Confirm add") }} </el-button> </div> </div> </div> </template> <script> import _ from 'lodash' import { mapActions, mapState } from 'vuex' import mLog from './log' import mMr from './tasks/mr' import mSql from './tasks/sql' import i18n from '@/module/i18n' import mListBox from './tasks/_source/listBox' import mShell from './tasks/shell' import mWaterdrop from './tasks/waterdrop' import mSpark from './tasks/spark' import mFlink from './tasks/flink' import mPython from './tasks/python' import mProcedure from './tasks/procedure' import mDependent from './tasks/dependent' import mHttp from './tasks/http' import mDatax from './tasks/datax' import mTis from './tasks/tis' import mConditions from './tasks/conditions' import mSwitch from './tasks/switch.vue' import mSqoop from './tasks/sqoop' import mSubProcess from './tasks/sub_process' import mSelectInput from './_source/selectInput' import mTimeoutAlarm from './_source/timeoutAlarm' import mDependentTimeout from './_source/dependentTimeout' import mWorkerGroups from './_source/workerGroups' import mRelatedEnvironment from './_source/relatedEnvironment' import mPreTasks from './tasks/pre_tasks' import clickoutside from '@/module/util/clickoutside' import disabledState from '@/module/mixin/disabledState' import mPriority from '@/module/components/priority/priority' import { findComponentDownward } from '@/module/util/' // import ReferenceFromTask from './_source/referenceFromTask.vue' export default { name: 'form-model', data () { return { // loading spinnerLoading: false, // node name name: '', // description desc: '', // Node echo data backfillItem: {}, cacheBackfillItem: {}, // Resource(list) resourcesList: [], successNode: 'success', failedNode: 'failed', successBranch: '', failedBranch: '', conditionResult: { successNode: [], failedNode: [] }, switchResult: {}, // dependence dependence: {}, // cache dependence cacheDependence: {}, // task code code: 0, // Current node params data params: {}, // Running sign runFlag: 'YES', // The second echo problem caused by the node data is specifically which node hook caused the unfinished special treatment isContentBox: false, // Number of failed retries maxRetryTimes: '0', // Failure retry interval retryInterval: '1', // Delay execution time delayTime: '0', // Task timeout alarm timeout: {}, // (For Dependent nodes) Wait start timeout alarm waitStartTimeout: {}, // Task priority taskInstancePriority: 'MEDIUM', // worker group id workerGroup: 'default', // selected environment environmentCode: '', selectedWorkerGroup: '', stateList: [ { value: 'success', label: `${i18n.$t('Success')}` }, { value: 'failed', label: `${i18n.$t('Failed')}` } ], // for CONDITIONS postTasks: [], prevTasks: [], // refresh part of the formModel, after set backfillItem outside backfillRefresh: true, // whether this is a new Task isNewCreate: true } }, provide () { return { formModel: this } }, /** * Click on events that are not generated internally by the component */ directives: { clickoutside }, mixins: [disabledState], props: { nodeData: Object, type: { type: String, default: '' } }, inject: ['dagChart'], methods: { ...mapActions('dag', ['getTaskInstanceList']), taskToBackfillItem (task) { return { code: task.code, conditionResult: task.taskParams.conditionResult, delayTime: task.delayTime, dependence: task.taskParams.dependence, desc: task.description, id: task.id, maxRetryTimes: task.failRetryTimes, name: task.name, params: _.omit(task.taskParams, [ 'conditionResult', 'dependence', 'waitStartTimeout' ]), retryInterval: task.failRetryInterval, runFlag: task.flag, taskInstancePriority: task.taskPriority, timeout: { interval: task.timeout, strategy: task.timeoutNotifyStrategy, enable: task.timeoutFlag === 'OPEN' }, type: task.taskType, waitStartTimeout: task.taskParams.waitStartTimeout, workerGroup: task.workerGroup, environmentCode: task.environmentCode } }, /** * depend */ _onDependent (o) { this.dependence = Object.assign(this.dependence, {}, o) }, _onSwitchResult (o) { this.switchResult = o }, /** * cache dependent */ _onCacheDependent (o) { this.cacheDependence = Object.assign(this.cacheDependence, {}, o) }, /** * Task timeout alarm */ _onTimeout (o) { this.timeout = Object.assign(this.timeout, {}, o) }, /** * Dependent timeout alarm */ _onDependentTimeout (o) { this.timeout = Object.assign(this.timeout, {}, o.waitCompleteTimeout) this.waitStartTimeout = Object.assign( this.waitStartTimeout, {}, o.waitStartTimeout ) }, /** * Click external to close the current component */ _handleClose () { // this.close() }, /** * Jump to task instance */ _seeHistory () { this.$emit('seeHistory', this.backfillItem.name) }, /** * Enter the child node to judge the process instance or the process definition * @param type = instance */ _goSubProcess () { if (_.isEmpty(this.backfillItem)) { this.$message.warning( `${i18n.$t( 'The newly created sub-Process has not yet been executed and cannot enter the sub-Process' )}` ) return } if (this.router.history.current.name === 'projects-instance-details') { if (!this.taskInstance) { this.$message.warning( `${i18n.$t( 'The task has not been executed and cannot enter the sub-Process' )}` ) return } this.store .dispatch('dag/getSubProcessId', { taskId: this.taskInstance.id }) .then((res) => { this.$emit('onSubProcess', { subInstanceId: res.data.subProcessInstanceId, fromThis: this }) }) .catch((e) => { this.$message.error(e.msg || '') }) } else { const processDefinitionId = this.backfillItem.params.processDefinitionId const process = this.processListS.find( (process) => process.processDefinition.id === processDefinitionId ) this.$emit('onSubProcess', { subProcessCode: process.processDefinition.code, fromThis: this }) } }, _onUpdateWorkerGroup (o) { this.selectedWorkerGroup = o }, /** * return params */ _onParams (o) { this.params = Object.assign({}, o) }, _onUpdateEnvironmentCode (o) { this.environmentCode = o }, /** * _onCacheParams is reserved */ _onCacheParams (o) { this.params = Object.assign(this.params, {}, o) }, /** * verification name */ _verifName () { if (!_.trim(this.name)) { this.$message.warning(`${i18n.$t('Please enter name (required)')}`) return false } if ( this.successBranch !== '' && this.successBranch !== null && this.successBranch === this.failedBranch ) { this.$message.warning( `${i18n.$t( 'Cannot select the same node for successful branch flow and failed branch flow' )}` ) return false } if (this.name === this.backfillItem.name) { return true } // Name repeat depends on dom backfill dependent store const tasks = this.store.state.dag.tasks const task = tasks.find((t) => t.name === this.name) if (task) { this.$message.warning(`${i18n.$t('Name already exists')}`) return false } return true }, _verifWorkGroup () { let item = this.store.state.security.workerGroupsListAll.find((item) => { return item.id === this.workerGroup }) if (item === undefined) { this.$message.warning( `${i18n.$t( 'The Worker group no longer exists, please select the correct Worker group!' )}` ) return false } return true }, /** * Global verification procedure */ _verification () { // Verify name if (!this._verifName()) { return } // verif workGroup if (!this._verifWorkGroup()) { return } // Verify task alarm parameters if (this.nodeData.taskType === 'DEPENDENT') { if (!this.$refs.dependentTimeout._verification()) { return } } else { if (!this.$refs.timeout._verification()) { return } } // Verify node parameters if (!this.$refs[this.nodeData.taskType]._verification()) { return } // set preTask if (this.$refs.preTasks) { this.$refs.preTasks.setPreNodes() } this.conditionResult.successNode[0] = this.successBranch this.conditionResult.failedNode[0] = this.failedBranch this.$emit('addTaskInfo', { item: { code: this.nodeData.id, name: this.name, description: this.desc, taskType: this.nodeData.taskType, taskParams: { ...this.params, dependence: this.cacheDependence, conditionResult: this.conditionResult, waitStartTimeout: this.waitStartTimeout }, flag: this.runFlag, taskPriority: this.taskInstancePriority, workerGroup: this.workerGroup, failRetryTimes: this.maxRetryTimes, failRetryInterval: this.retryInterval, timeoutFlag: this.timeout.enable ? 'OPEN' : 'CLOSE', timeoutNotifyStrategy: this.timeout.strategy, timeout: this.timeout.interval || 0, delayTime: this.delayTime, environmentCode: this.environmentCode || -1, status: this.status, branch: this.branch }, fromThis: this }) // set run flag this._setRunFlag() // set edge label this._setEdgeLabel() }, /** * Sub-workflow selected node echo name */ _onSetProcessName (name) { this.name = name }, /** * set run flag * TODO */ _setRunFlag () {}, _setEdgeLabel () { if (this.successBranch || this.failedBranch) { const canvas = findComponentDownward(this.dagChart, 'dag-canvas') const edges = canvas.getEdges() const successTask = this.postTasks.find( (t) => t.name === this.successBranch ) const failedTask = this.postTasks.find( (t) => t.name === this.failedBranch ) const sEdge = edges.find( (edge) => successTask && edge.sourceId === this.code && edge.targetId === successTask.code ) const fEdge = edges.find( (edge) => failedTask && edge.sourceId === this.code && edge.targetId === failedTask.code ) sEdge && canvas.setEdgeLabel(sEdge.id, this.$t('Success')) fEdge && canvas.setEdgeLabel(fEdge.id, this.$t('Failed')) } }, /** * Submit verification */ ok () { this._verification() }, /** * Close and destroy component and component internal events */ close () { let flag = false // Delete node without storage if (!this.backfillItem.name) { flag = true } this.isContentBox = false // flag Whether to delete a node this.$destroy() this.$emit('close', { item: this.cacheBackfillItem, flag: flag, fromThis: this }) }, backfill (backfillItem) { const o = backfillItem // Non-null objects represent backfill if (!_.isEmpty(o)) { this.code = o.code this.name = o.name this.taskInstancePriority = o.taskInstancePriority this.runFlag = o.runFlag || 'YES' this.desc = o.desc this.maxRetryTimes = o.maxRetryTimes this.retryInterval = o.retryInterval this.delayTime = o.delayTime if (o.conditionResult) { this.successBranch = o.conditionResult.successNode[0] this.failedBranch = o.conditionResult.failedNode[0] } // If the workergroup has been deleted, set the default workergroup for ( let i = 0; i < this.store.state.security.workerGroupsListAll.length; i++ ) { let workerGroup = this.store.state.security.workerGroupsListAll[i].id if (o.workerGroup === workerGroup) { break } } if (o.workerGroup === undefined) { this.store .dispatch('dag/getTaskInstanceList', { pageSize: 10, pageNo: 1, processInstanceId: this.nodeData.instanceId, name: o.name }) .then((res) => { this.workerGroup = res.totalList[0].workerGroup }) } else { this.workerGroup = o.workerGroup } this.environmentCode = o.environmentCode === -1 ? '' : o.environmentCode this.params = o.params || {} this.dependence = o.dependence || {} this.cacheDependence = o.dependence || {} } else { this.workerGroup = this.store.state.security.workerGroupsListAll[0].id } this.cacheBackfillItem = JSON.parse(JSON.stringify(o)) this.isContentBox = true } }, created () { // Backfill data let taskList = this.store.state.dag.tasks let o = {} if (taskList.length) { taskList.forEach((task) => { if (task.code === this.nodeData.id) { const backfillItem = this.taskToBackfillItem(task) o = backfillItem this.backfillItem = backfillItem this.isNewCreate = false } }) } this.code = this.nodeData.id this.backfill(o) if (this.dagChart) { const canvas = findComponentDownward(this.dagChart, 'dag-canvas') const postNodes = canvas.getPostNodes(this.code) const prevNodes = canvas.getPrevNodes(this.code) const buildTask = (node) => ({ code: node.id, name: node.data.taskName, type: node.data.taskType }) this.postTasks = postNodes.map(buildTask) this.prevTasks = prevNodes.map(buildTask) } }, mounted () { let self = this $('#cancelBtn').mousedown(function (event) { event.preventDefault() self.close() }) }, updated () {}, beforeDestroy () {}, destroyed () {}, computed: { ...mapState('dag', ['processListS', 'taskInstances']), /** * Child workflow entry show/hide */ _isGoSubProcess () { return this.nodeData.taskType === 'SUB_PROCESS' && this.name }, taskInstance () { if (this.taskInstances.length > 0) { return this.taskInstances.find( (instance) => instance.taskCode === this.nodeData.id ) } return null } }, components: { mListBox, mMr, mShell, mWaterdrop, mSubProcess, mProcedure, mSql, mLog, mSpark, mFlink, mPython, mDependent, mHttp, mDatax, mTis, mSqoop, mConditions, mSwitch, mSelectInput, mTimeoutAlarm, mDependentTimeout, mPriority, mWorkerGroups, mRelatedEnvironment, mPreTasks // ReferenceFromTask } } </script> <style lang="scss" rel="stylesheet/scss"> @import "./formModel"; .ans-radio-disabled { .ans-radio-inner:after { background-color: #6f8391; } } </style>
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,438
[Improvement][Worker] remove meaningless DB query
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description remove meaningless DB query org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread In the process of task kill, it will query the DB to obtain the task instance object, but this judgment does not make any sense. In fact, the relevant context is already clear, so there is no need to query from DB. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6438
https://github.com/apache/dolphinscheduler/pull/6440
d284e44d52f75bd6c5f5756d9b7ae4a5af24fadc
ea2a8d26a0d42683309b8bda9ef24f4daaab266b
"2021-10-01T17:01:38Z"
java
"2021-10-02T05:09:00Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/builder/TaskExecutionContextBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.builder; import static org.apache.dolphinscheduler.common.Constants.SEC_2_MINUTES_TIME_UNIT; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.enums.TimeoutFlag; import org.apache.dolphinscheduler.dao.entity.ProcessDefinition; import org.apache.dolphinscheduler.dao.entity.ProcessInstance; import org.apache.dolphinscheduler.dao.entity.TaskDefinition; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.DataxTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.ProcedureTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.SQLTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.SqoopTaskExecutionContext; /** * TaskExecutionContext builder */ public class TaskExecutionContextBuilder { public static TaskExecutionContextBuilder get() { return new TaskExecutionContextBuilder(); } private TaskExecutionContext taskExecutionContext = new TaskExecutionContext(); /** * build taskInstance related info * * @param taskInstance taskInstance * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildTaskInstanceRelatedInfo(TaskInstance taskInstance) { taskExecutionContext.setTaskInstanceId(taskInstance.getId()); taskExecutionContext.setTaskName(taskInstance.getName()); taskExecutionContext.setFirstSubmitTime(taskInstance.getFirstSubmitTime()); taskExecutionContext.setStartTime(taskInstance.getStartTime()); taskExecutionContext.setTaskType(taskInstance.getTaskType()); taskExecutionContext.setLogPath(taskInstance.getLogPath()); taskExecutionContext.setWorkerGroup(taskInstance.getWorkerGroup()); taskExecutionContext.setEnvironmentConfig(taskInstance.getEnvironmentConfig()); taskExecutionContext.setHost(taskInstance.getHost()); taskExecutionContext.setResources(taskInstance.getResources()); taskExecutionContext.setDelayTime(taskInstance.getDelayTime()); taskExecutionContext.setVarPool(taskInstance.getVarPool()); return this; } public TaskExecutionContextBuilder buildTaskDefinitionRelatedInfo(TaskDefinition taskDefinition) { taskExecutionContext.setTaskTimeout(Integer.MAX_VALUE); if (taskDefinition.getTimeoutFlag() == TimeoutFlag.OPEN) { taskExecutionContext.setTaskTimeoutStrategy(taskDefinition.getTimeoutNotifyStrategy()); if (taskDefinition.getTimeoutNotifyStrategy() == TaskTimeoutStrategy.FAILED || taskDefinition.getTimeoutNotifyStrategy() == TaskTimeoutStrategy.WARNFAILED) { taskExecutionContext.setTaskTimeout(Math.min(taskDefinition.getTimeout() * SEC_2_MINUTES_TIME_UNIT, Integer.MAX_VALUE)); } } taskExecutionContext.setTaskParams(taskDefinition.getTaskParams()); return this; } /** * build processInstance related info * * @param processInstance processInstance * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildProcessInstanceRelatedInfo(ProcessInstance processInstance) { taskExecutionContext.setProcessInstanceId(processInstance.getId()); taskExecutionContext.setScheduleTime(processInstance.getScheduleTime()); taskExecutionContext.setGlobalParams(processInstance.getGlobalParams()); taskExecutionContext.setExecutorId(processInstance.getExecutorId()); taskExecutionContext.setCmdTypeIfComplement(processInstance.getCmdTypeIfComplement().getCode()); taskExecutionContext.setTenantCode(processInstance.getTenantCode()); taskExecutionContext.setQueue(processInstance.getQueue()); return this; } /** * build processDefinition related info * * @param processDefinition processDefinition * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildProcessDefinitionRelatedInfo(ProcessDefinition processDefinition) { taskExecutionContext.setProcessDefineCode(processDefinition.getCode()); taskExecutionContext.setProcessDefineVersion(processDefinition.getVersion()); taskExecutionContext.setProjectCode(processDefinition.getProjectCode()); return this; } /** * build SQLTask related info * * @param sqlTaskExecutionContext sqlTaskExecutionContext * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildSQLTaskRelatedInfo(SQLTaskExecutionContext sqlTaskExecutionContext) { taskExecutionContext.setSqlTaskExecutionContext(sqlTaskExecutionContext); return this; } /** * build DataxTask related info * * @param dataxTaskExecutionContext dataxTaskExecutionContext * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildDataxTaskRelatedInfo(DataxTaskExecutionContext dataxTaskExecutionContext) { taskExecutionContext.setDataxTaskExecutionContext(dataxTaskExecutionContext); return this; } /** * build procedureTask related info * * @param procedureTaskExecutionContext procedureTaskExecutionContext * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildProcedureTaskRelatedInfo(ProcedureTaskExecutionContext procedureTaskExecutionContext) { taskExecutionContext.setProcedureTaskExecutionContext(procedureTaskExecutionContext); return this; } /** * build sqoopTask related info * * @param sqoopTaskExecutionContext sqoopTaskExecutionContext * @return TaskExecutionContextBuilder */ public TaskExecutionContextBuilder buildSqoopTaskRelatedInfo(SqoopTaskExecutionContext sqoopTaskExecutionContext) { taskExecutionContext.setSqoopTaskExecutionContext(sqoopTaskExecutionContext); return this; } /** * create * * @return taskExecutionContext */ public TaskExecutionContext create() { return taskExecutionContext; } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,438
[Improvement][Worker] remove meaningless DB query
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description remove meaningless DB query org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread In the process of task kill, it will query the DB to obtain the task instance object, but this judgment does not make any sense. In fact, the relevant context is already clear, so there is no need to query from DB. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6438
https://github.com/apache/dolphinscheduler/pull/6440
d284e44d52f75bd6c5f5756d9b7ae4a5af24fadc
ea2a8d26a0d42683309b8bda9ef24f4daaab266b
"2021-10-01T17:01:38Z"
java
"2021-10-02T05:09:00Z"
dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskExecuteThread.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.server.worker.runner; import static java.util.Calendar.DAY_OF_MONTH; import org.apache.dolphinscheduler.common.Constants; import org.apache.dolphinscheduler.common.enums.CommandType; import org.apache.dolphinscheduler.common.enums.Event; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.TaskType; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.CommonUtils; import org.apache.dolphinscheduler.common.utils.DateUtils; import org.apache.dolphinscheduler.common.utils.HadoopUtils; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.common.utils.LoggerUtils; import org.apache.dolphinscheduler.common.utils.OSUtils; import org.apache.dolphinscheduler.common.utils.RetryerUtils; import org.apache.dolphinscheduler.dao.entity.TaskInstance; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.TaskExecuteAckCommand; import org.apache.dolphinscheduler.remote.command.TaskExecuteResponseCommand; import org.apache.dolphinscheduler.server.utils.ProcessUtils; import org.apache.dolphinscheduler.server.worker.cache.ResponceCache; import org.apache.dolphinscheduler.server.worker.plugin.TaskPluginManager; import org.apache.dolphinscheduler.server.worker.processor.TaskCallbackService; import org.apache.dolphinscheduler.service.alert.AlertClientService; import org.apache.dolphinscheduler.service.bean.SpringApplicationContext; import org.apache.dolphinscheduler.service.process.ProcessService; import org.apache.dolphinscheduler.service.queue.entity.TaskExecutionContext; import org.apache.dolphinscheduler.spi.exception.PluginNotFoundException; import org.apache.dolphinscheduler.spi.task.AbstractTask; import org.apache.dolphinscheduler.spi.task.TaskAlertInfo; import org.apache.dolphinscheduler.spi.task.TaskChannel; import org.apache.dolphinscheduler.spi.task.TaskExecutionContextCacheManager; import org.apache.dolphinscheduler.spi.task.request.TaskRequest; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import java.io.File; import java.io.IOException; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.Delayed; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.rholder.retry.RetryException; /** * task scheduler thread */ public class TaskExecuteThread implements Runnable, Delayed { /** * logger */ private final Logger logger = LoggerFactory.getLogger(TaskExecuteThread.class); /** * task instance */ private TaskExecutionContext taskExecutionContext; /** * abstract task */ private AbstractTask task; /** * task callback service */ private TaskCallbackService taskCallbackService; /** * taskExecutionContextCacheManager */ private TaskExecutionContextCacheManager taskExecutionContextCacheManager; /** * task logger */ private Logger taskLogger; /** * alert client server */ private AlertClientService alertClientService; private TaskPluginManager taskPluginManager; /** * process database access */ protected ProcessService processService; /** * constructor * * @param taskExecutionContext taskExecutionContext * @param taskCallbackService taskCallbackService */ public TaskExecuteThread(TaskExecutionContext taskExecutionContext, TaskCallbackService taskCallbackService, AlertClientService alertClientService) { this.taskExecutionContext = taskExecutionContext; this.taskCallbackService = taskCallbackService; this.alertClientService = alertClientService; this.processService = SpringApplicationContext.getBean(ProcessService.class); } public TaskExecuteThread(TaskExecutionContext taskExecutionContext, TaskCallbackService taskCallbackService, AlertClientService alertClientService, TaskPluginManager taskPluginManager) { this.taskExecutionContext = taskExecutionContext; this.taskCallbackService = taskCallbackService; this.alertClientService = alertClientService; this.taskPluginManager = taskPluginManager; this.processService = SpringApplicationContext.getBean(ProcessService.class); } @Override public void run() { TaskExecuteResponseCommand responseCommand = new TaskExecuteResponseCommand(taskExecutionContext.getTaskInstanceId(), taskExecutionContext.getProcessInstanceId()); try { logger.info("script path : {}", taskExecutionContext.getExecutePath()); // check if the OS user exists if (!OSUtils.getUserList().contains(taskExecutionContext.getTenantCode())) { String errorLog = String.format("tenantCode: %s does not exist", taskExecutionContext.getTenantCode()); taskLogger.error(errorLog); responseCommand.setStatus(ExecutionStatus.FAILURE.getCode()); responseCommand.setEndTime(new Date()); return; } if (taskExecutionContext.getStartTime() == null) { taskExecutionContext.setStartTime(new Date()); } if (taskExecutionContext.getCurrentExecutionStatus() != ExecutionStatus.RUNNING_EXECUTION) { changeTaskExecutionStatusToRunning(); } logger.info("the task begins to execute. task instance id: {}", taskExecutionContext.getTaskInstanceId()); TaskInstance taskInstance = processService.findTaskInstanceById(taskExecutionContext.getTaskInstanceId()); int dryRun = taskInstance.getDryRun(); // copy hdfs/minio file to local if (dryRun == Constants.DRY_RUN_FLAG_NO) { downloadResource(taskExecutionContext.getExecutePath(), taskExecutionContext.getResources(), logger); } taskExecutionContext.setEnvFile(CommonUtils.getSystemEnvPath()); taskExecutionContext.setDefinedParams(getGlobalParamsMap()); taskExecutionContext.setTaskAppId(String.format("%s_%s", taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId())); preBuildBusinessParams(); TaskChannel taskChannel = taskPluginManager.getTaskChannelMap().get(taskExecutionContext.getTaskType()); if (null == taskChannel) { throw new PluginNotFoundException(String.format("%s Task Plugin Not Found,Please Check Config File.", taskExecutionContext.getTaskType())); } TaskRequest taskRequest = JSONUtils.parseObject(JSONUtils.toJsonString(taskExecutionContext), TaskRequest.class); String taskLogName = LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX, taskExecutionContext.getProcessDefineCode(), taskExecutionContext.getProcessDefineVersion(), taskExecutionContext.getProcessInstanceId(), taskExecutionContext.getTaskInstanceId()); taskRequest.setTaskLogName(taskLogName); task = taskChannel.createTask(taskRequest); // task init this.task.init(); //init varPool this.task.getParameters().setVarPool(taskExecutionContext.getVarPool()); if (dryRun == Constants.DRY_RUN_FLAG_NO) { // task handle this.task.handle(); // task result process if (this.task.getNeedAlert()) { sendAlert(this.task.getTaskAlertInfo()); } responseCommand.setStatus(this.task.getExitStatus().getCode()); } else { responseCommand.setStatus(ExecutionStatus.SUCCESS.getCode()); task.setExitStatusCode(Constants.EXIT_CODE_SUCCESS); } responseCommand.setEndTime(new Date()); responseCommand.setProcessId(this.task.getProcessId()); responseCommand.setAppIds(this.task.getAppIds()); responseCommand.setVarPool(JSONUtils.toJsonString(this.task.getParameters().getVarPool())); logger.info("task instance id : {},task final status : {}", taskExecutionContext.getTaskInstanceId(), this.task.getExitStatus()); } catch (Throwable e) { logger.error("task scheduler failure", e); kill(); responseCommand.setStatus(ExecutionStatus.FAILURE.getCode()); responseCommand.setEndTime(new Date()); responseCommand.setProcessId(task.getProcessId()); responseCommand.setAppIds(task.getAppIds()); } finally { TaskExecutionContextCacheManager.removeByTaskInstanceId(taskExecutionContext.getTaskInstanceId()); ResponceCache.get().cache(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command(), Event.RESULT); taskCallbackService.sendResult(taskExecutionContext.getTaskInstanceId(), responseCommand.convert2Command()); clearTaskExecPath(); } } private void sendAlert(TaskAlertInfo taskAlertInfo) { alertClientService.sendAlert(taskAlertInfo.getAlertGroupId(), taskAlertInfo.getTitle(), taskAlertInfo.getContent()); } /** * when task finish, clear execute path. */ private void clearTaskExecPath() { logger.info("develop mode is: {}", CommonUtils.isDevelopMode()); if (!CommonUtils.isDevelopMode()) { // get exec dir String execLocalPath = taskExecutionContext.getExecutePath(); if (StringUtils.isEmpty(execLocalPath)) { logger.warn("task: {} exec local path is empty.", taskExecutionContext.getTaskName()); return; } if ("/".equals(execLocalPath)) { logger.warn("task: {} exec local path is '/', direct deletion is not allowed", taskExecutionContext.getTaskName()); return; } try { org.apache.commons.io.FileUtils.deleteDirectory(new File(execLocalPath)); logger.info("exec local path: {} cleared.", execLocalPath); } catch (IOException e) { logger.error("delete exec dir failed : {}", e.getMessage(), e); } } } /** * get global paras map * * @return map */ private Map<String, String> getGlobalParamsMap() { Map<String, String> globalParamsMap = new HashMap<>(16); // global params string String globalParamsStr = taskExecutionContext.getGlobalParams(); if (globalParamsStr != null) { List<Property> globalParamsList = JSONUtils.toList(globalParamsStr, Property.class); globalParamsMap.putAll(globalParamsList.stream().collect(Collectors.toMap(Property::getProp, Property::getValue))); } return globalParamsMap; } /** * kill task */ public void kill() { if (task != null) { try { task.cancelApplication(true); TaskInstance taskInstance = processService.findTaskInstanceById(taskExecutionContext.getTaskInstanceId()); if (taskInstance != null) { ProcessUtils.killYarnJob(taskExecutionContext); } } catch (Exception e) { logger.error(e.getMessage(), e); } } } /** * download resource file * * @param execLocalPath execLocalPath * @param projectRes projectRes * @param logger logger */ private void downloadResource(String execLocalPath, Map<String, String> projectRes, Logger logger) { if (MapUtils.isEmpty(projectRes)) { return; } Set<Map.Entry<String, String>> resEntries = projectRes.entrySet(); for (Map.Entry<String, String> resource : resEntries) { String fullName = resource.getKey(); String tenantCode = resource.getValue(); File resFile = new File(execLocalPath, fullName); if (!resFile.exists()) { try { // query the tenant code of the resource according to the name of the resource String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tenantCode, fullName); logger.info("get resource file from hdfs :{}", resHdfsPath); HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + fullName, false, true); } catch (Exception e) { logger.error(e.getMessage(), e); throw new RuntimeException(e.getMessage()); } } else { logger.info("file : {} exists ", resFile.getName()); } } } /** * send an ack to change the status of the task. */ private void changeTaskExecutionStatusToRunning() { taskExecutionContext.setCurrentExecutionStatus(ExecutionStatus.RUNNING_EXECUTION); Command ackCommand = buildAckCommand().convert2Command(); try { RetryerUtils.retryCall(() -> { taskCallbackService.sendAck(taskExecutionContext.getTaskInstanceId(), ackCommand); return Boolean.TRUE; }); } catch (ExecutionException | RetryException e) { logger.error(e.getMessage(), e); } } /** * build ack command. * * @return TaskExecuteAckCommand */ private TaskExecuteAckCommand buildAckCommand() { TaskExecuteAckCommand ackCommand = new TaskExecuteAckCommand(); ackCommand.setTaskInstanceId(taskExecutionContext.getTaskInstanceId()); ackCommand.setStatus(taskExecutionContext.getCurrentExecutionStatus().getCode()); ackCommand.setStartTime(taskExecutionContext.getStartTime()); ackCommand.setLogPath(taskExecutionContext.getLogPath()); ackCommand.setHost(taskExecutionContext.getHost()); if (TaskType.SQL.getDesc().equalsIgnoreCase(taskExecutionContext.getTaskType()) || TaskType.PROCEDURE.getDesc().equalsIgnoreCase(taskExecutionContext.getTaskType())) { ackCommand.setExecutePath(null); } else { ackCommand.setExecutePath(taskExecutionContext.getExecutePath()); } return ackCommand; } /** * get current TaskExecutionContext * * @return TaskExecutionContext */ public TaskExecutionContext getTaskExecutionContext() { return this.taskExecutionContext; } @Override public long getDelay(TimeUnit unit) { return unit.convert(DateUtils.getRemainTime(taskExecutionContext.getFirstSubmitTime(), taskExecutionContext.getDelayTime() * 60L), TimeUnit.SECONDS); } @Override public int compareTo(Delayed o) { if (o == null) { return 1; } return Long.compare(this.getDelay(TimeUnit.MILLISECONDS), o.getDelay(TimeUnit.MILLISECONDS)); } private void preBuildBusinessParams() { Map<String, Property> paramsMap = new HashMap<>(); // replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job if (taskExecutionContext.getScheduleTime() != null) { Date date = taskExecutionContext.getScheduleTime(); if (CommandType.COMPLEMENT_DATA.getCode() == taskExecutionContext.getCmdTypeIfComplement()) { date = DateUtils.add(taskExecutionContext.getScheduleTime(), DAY_OF_MONTH, 1); } String dateTime = DateUtils.format(date, Constants.PARAMETER_FORMAT_TIME); Property p = new Property(); p.setValue(dateTime); p.setProp(Constants.PARAMETER_DATETIME); paramsMap.put(Constants.PARAMETER_DATETIME, p); } taskExecutionContext.setParamsMap(paramsMap); } }
closed
apache/dolphinscheduler
https://github.com/apache/dolphinscheduler
6,438
[Improvement][Worker] remove meaningless DB query
### Search before asking - [X] I had searched in the [issues](https://github.com/apache/dolphinscheduler/issues?q=is%3Aissue) and found no similar feature requirement. ### Description remove meaningless DB query org.apache.dolphinscheduler.server.worker.runner.TaskExecuteThread In the process of task kill, it will query the DB to obtain the task instance object, but this judgment does not make any sense. In fact, the relevant context is already clear, so there is no need to query from DB. ### Use case _No response_ ### Related issues _No response_ ### Are you willing to submit a PR? - [X] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://www.apache.org/foundation/policies/conduct)
https://github.com/apache/dolphinscheduler/issues/6438
https://github.com/apache/dolphinscheduler/pull/6440
d284e44d52f75bd6c5f5756d9b7ae4a5af24fadc
ea2a8d26a0d42683309b8bda9ef24f4daaab266b
"2021-10-01T17:01:38Z"
java
"2021-10-02T05:09:00Z"
dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/queue/entity/TaskExecutionContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.dolphinscheduler.service.queue.entity; import org.apache.dolphinscheduler.common.enums.ExecutionStatus; import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy; import org.apache.dolphinscheduler.common.process.Property; import org.apache.dolphinscheduler.common.utils.JSONUtils; import org.apache.dolphinscheduler.remote.command.Command; import org.apache.dolphinscheduler.remote.command.TaskExecuteRequestCommand; import org.apache.dolphinscheduler.spi.task.request.DataxTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.ProcedureTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.SQLTaskExecutionContext; import org.apache.dolphinscheduler.spi.task.request.SqoopTaskExecutionContext; import java.io.Serializable; import java.util.Date; import java.util.Map; import com.fasterxml.jackson.annotation.JsonFormat; /** * master/worker task transport */ public class TaskExecutionContext implements Serializable { /** * task id */ private int taskInstanceId; /** * task name */ private String taskName; /** * task first submit time. */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date firstSubmitTime; /** * task start time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date startTime; /** * task type */ private String taskType; /** * host */ private String host; /** * task execute path */ private String executePath; /** * log path */ private String logPath; /** * task json */ private String taskJson; /** * processId */ private int processId; /** * processCode */ private Long processDefineCode; /** * processVersion */ private int processDefineVersion; /** * appIds */ private String appIds; /** * process instance id */ private int processInstanceId; /** * process instance schedule time */ @JsonFormat(pattern = "yyyy-MM-dd HH:mm:ss", timezone = "GMT+8") private Date scheduleTime; /** * process instance global parameters */ private String globalParams; /** * execute user id */ private int executorId; /** * command type if complement */ private int cmdTypeIfComplement; /** * tenant code */ private String tenantCode; /** * task queue */ private String queue; /** * project code */ private long projectCode; /** * taskParams */ private String taskParams; /** * envFile */ private String envFile; /** * environmentConfig */ private String environmentConfig; /** * definedParams */ private Map<String, String> definedParams; /** * task AppId */ private String taskAppId; /** * task timeout strategy */ private TaskTimeoutStrategy taskTimeoutStrategy; /** * task timeout */ private int taskTimeout; /** * worker group */ private String workerGroup; /** * delay execution time. */ private int delayTime; /** * current execution status */ private ExecutionStatus currentExecutionStatus; /** * resources full name and tenant code */ private Map<String, String> resources; /** * sql TaskExecutionContext */ private SQLTaskExecutionContext sqlTaskExecutionContext; /** * datax TaskExecutionContext */ private DataxTaskExecutionContext dataxTaskExecutionContext; /** * dependence TaskExecutionContext */ private DependenceTaskExecutionContext dependenceTaskExecutionContext; /** * sqoop TaskExecutionContext */ private SqoopTaskExecutionContext sqoopTaskExecutionContext; /** * taskInstance varPool */ private String varPool; /** * business param */ private Map<String, Property> paramsMap; public Map<String, Property> getParamsMap() { return paramsMap; } public void setParamsMap(Map<String, Property> paramsMap) { this.paramsMap = paramsMap; } /** * procedure TaskExecutionContext */ private ProcedureTaskExecutionContext procedureTaskExecutionContext; public int getTaskInstanceId() { return taskInstanceId; } public void setTaskInstanceId(int taskInstanceId) { this.taskInstanceId = taskInstanceId; } public String getTaskName() { return taskName; } public void setTaskName(String taskName) { this.taskName = taskName; } public Date getFirstSubmitTime() { return firstSubmitTime; } public void setFirstSubmitTime(Date firstSubmitTime) { this.firstSubmitTime = firstSubmitTime; } public Date getStartTime() { return startTime; } public void setStartTime(Date startTime) { this.startTime = startTime; } public String getTaskType() { return taskType; } public void setTaskType(String taskType) { this.taskType = taskType; } public String getHost() { return host; } public void setHost(String host) { this.host = host; } public String getExecutePath() { return executePath; } public void setExecutePath(String executePath) { this.executePath = executePath; } public String getLogPath() { return logPath; } public void setLogPath(String logPath) { this.logPath = logPath; } public String getTaskJson() { return taskJson; } public void setTaskJson(String taskJson) { this.taskJson = taskJson; } public int getProcessId() { return processId; } public void setProcessId(int processId) { this.processId = processId; } public Long getProcessDefineCode() { return processDefineCode; } public void setProcessDefineCode(Long processDefineCode) { this.processDefineCode = processDefineCode; } public int getProcessDefineVersion() { return processDefineVersion; } public void setProcessDefineVersion(int processDefineVersion) { this.processDefineVersion = processDefineVersion; } public String getAppIds() { return appIds; } public void setAppIds(String appIds) { this.appIds = appIds; } public int getProcessInstanceId() { return processInstanceId; } public void setProcessInstanceId(int processInstanceId) { this.processInstanceId = processInstanceId; } public Date getScheduleTime() { return scheduleTime; } public void setScheduleTime(Date scheduleTime) { this.scheduleTime = scheduleTime; } public String getGlobalParams() { return globalParams; } public void setGlobalParams(String globalParams) { this.globalParams = globalParams; } public int getExecutorId() { return executorId; } public void setExecutorId(int executorId) { this.executorId = executorId; } public int getCmdTypeIfComplement() { return cmdTypeIfComplement; } public void setCmdTypeIfComplement(int cmdTypeIfComplement) { this.cmdTypeIfComplement = cmdTypeIfComplement; } public String getTenantCode() { return tenantCode; } public void setTenantCode(String tenantCode) { this.tenantCode = tenantCode; } public String getQueue() { return queue; } public void setQueue(String queue) { this.queue = queue; } public long getProjectCode() { return projectCode; } public void setProjectCode(long projectCode) { this.projectCode = projectCode; } public String getTaskParams() { return taskParams; } public void setTaskParams(String taskParams) { this.taskParams = taskParams; } public String getEnvFile() { return envFile; } public void setEnvFile(String envFile) { this.envFile = envFile; } public String getEnvironmentConfig() { return environmentConfig; } public void setEnvironmentConfig(String config) { this.environmentConfig = config; } public Map<String, String> getDefinedParams() { return definedParams; } public void setDefinedParams(Map<String, String> definedParams) { this.definedParams = definedParams; } public String getTaskAppId() { return taskAppId; } public void setTaskAppId(String taskAppId) { this.taskAppId = taskAppId; } public TaskTimeoutStrategy getTaskTimeoutStrategy() { return taskTimeoutStrategy; } public void setTaskTimeoutStrategy(TaskTimeoutStrategy taskTimeoutStrategy) { this.taskTimeoutStrategy = taskTimeoutStrategy; } public int getTaskTimeout() { return taskTimeout; } public void setTaskTimeout(int taskTimeout) { this.taskTimeout = taskTimeout; } public String getWorkerGroup() { return workerGroup; } public void setWorkerGroup(String workerGroup) { this.workerGroup = workerGroup; } public int getDelayTime() { return delayTime; } public void setDelayTime(int delayTime) { this.delayTime = delayTime; } public ExecutionStatus getCurrentExecutionStatus() { return currentExecutionStatus; } public void setCurrentExecutionStatus(ExecutionStatus currentExecutionStatus) { this.currentExecutionStatus = currentExecutionStatus; } public SQLTaskExecutionContext getSqlTaskExecutionContext() { return sqlTaskExecutionContext; } public void setSqlTaskExecutionContext(SQLTaskExecutionContext sqlTaskExecutionContext) { this.sqlTaskExecutionContext = sqlTaskExecutionContext; } public DataxTaskExecutionContext getDataxTaskExecutionContext() { return dataxTaskExecutionContext; } public void setDataxTaskExecutionContext(DataxTaskExecutionContext dataxTaskExecutionContext) { this.dataxTaskExecutionContext = dataxTaskExecutionContext; } public ProcedureTaskExecutionContext getProcedureTaskExecutionContext() { return procedureTaskExecutionContext; } public void setProcedureTaskExecutionContext(ProcedureTaskExecutionContext procedureTaskExecutionContext) { this.procedureTaskExecutionContext = procedureTaskExecutionContext; } public Command toCommand() { TaskExecuteRequestCommand requestCommand = new TaskExecuteRequestCommand(); requestCommand.setTaskExecutionContext(JSONUtils.toJsonString(this)); return requestCommand.convert2Command(); } public DependenceTaskExecutionContext getDependenceTaskExecutionContext() { return dependenceTaskExecutionContext; } public void setDependenceTaskExecutionContext(DependenceTaskExecutionContext dependenceTaskExecutionContext) { this.dependenceTaskExecutionContext = dependenceTaskExecutionContext; } public Map<String, String> getResources() { return resources; } public void setResources(Map<String, String> resources) { this.resources = resources; } public SqoopTaskExecutionContext getSqoopTaskExecutionContext() { return sqoopTaskExecutionContext; } public void setSqoopTaskExecutionContext(SqoopTaskExecutionContext sqoopTaskExecutionContext) { this.sqoopTaskExecutionContext = sqoopTaskExecutionContext; } @Override public String toString() { return "TaskExecutionContext{" + "taskInstanceId=" + taskInstanceId + ", taskName='" + taskName + '\'' + ", currentExecutionStatus=" + currentExecutionStatus + ", firstSubmitTime=" + firstSubmitTime + ", startTime=" + startTime + ", taskType='" + taskType + '\'' + ", host='" + host + '\'' + ", executePath='" + executePath + '\'' + ", logPath='" + logPath + '\'' + ", taskJson='" + taskJson + '\'' + ", processId=" + processId + ", processDefineCode=" + processDefineCode + ", processDefineVersion=" + processDefineVersion + ", appIds='" + appIds + '\'' + ", processInstanceId=" + processInstanceId + ", scheduleTime=" + scheduleTime + ", globalParams='" + globalParams + '\'' + ", executorId=" + executorId + ", cmdTypeIfComplement=" + cmdTypeIfComplement + ", tenantCode='" + tenantCode + '\'' + ", queue='" + queue + '\'' + ", projectCode=" + projectCode + ", taskParams='" + taskParams + '\'' + ", envFile='" + envFile + '\'' + ", definedParams=" + definedParams + ", taskAppId='" + taskAppId + '\'' + ", taskTimeoutStrategy=" + taskTimeoutStrategy + ", taskTimeout=" + taskTimeout + ", workerGroup='" + workerGroup + '\'' + ", environmentConfig='" + environmentConfig + '\'' + ", delayTime=" + delayTime + ", resources=" + resources + ", sqlTaskExecutionContext=" + sqlTaskExecutionContext + ", dataxTaskExecutionContext=" + dataxTaskExecutionContext + ", dependenceTaskExecutionContext=" + dependenceTaskExecutionContext + ", sqoopTaskExecutionContext=" + sqoopTaskExecutionContext + ", procedureTaskExecutionContext=" + procedureTaskExecutionContext + '}'; } public String getVarPool() { return varPool; } public void setVarPool(String varPool) { this.varPool = varPool; } }