release: new version 2.0.9 (#902)

diff --git a/docs/2.0.9/configs/docs2-0-9.js b/docs/2.0.9/configs/docs2-0-9.js
new file mode 100644
index 0000000..495182c
--- /dev/null
+++ b/docs/2.0.9/configs/docs2-0-9.js
@@ -0,0 +1,890 @@
+export default {
+  'en-us': {
+    sidemenu: [
+      {
+        title: 'About',
+        children: [
+          {
+            title: 'Introduction',
+            link: '/en-us/docs/2.0.9/user_doc/About_DolphinScheduler/About_DolphinScheduler.html',
+          },
+          {
+            title: 'Hardware Environment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/hardware.html',
+          },
+          {
+            title: 'Glossary',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/designplus.html',
+          },
+        ],
+      },
+      {
+        title: 'Quick Start',
+        children: [
+          {
+            title: 'Quick Start',
+            link: '/en-us/docs/2.0.9/user_doc/guide/quick-start.html',
+          },
+          {
+            title: 'Docker Deployment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/docker.html',
+           },
+            ],
+          },
+      {
+        title: 'Installation',
+        children: [
+          {
+            title: 'Standalone Deployment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/standalone.html',
+          },
+          {
+            title: 'Pseudo Cluster Deployment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/pseudo-cluster.html',
+          },
+          {
+            title: 'Cluster Deployment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/cluster.html',
+          },
+          {
+            title: 'Kubernetes Deployment',
+            link: '/en-us/docs/2.0.9/user_doc/guide/installation/kubernetes.html',
+          },
+        ],
+      },
+      {
+        title: 'Introduction to Functions',
+        children: [
+          {
+            title: 'Workflow Overview',
+            link: '/en-us/docs/2.0.9/user_doc/guide/homepage.html',
+          },
+          {
+            title: 'Project',
+            children: [
+              {
+                title: 'Project List',
+                link: '/en-us/docs/2.0.9/user_doc/guide/project/project-list.html',
+              },
+              {
+                title: 'Workflow Definition',
+                link: '/en-us/docs/2.0.9/user_doc/guide/project/workflow-definition.html',
+              },
+              {
+                title: 'Workflow Instance',
+                link: '/en-us/docs/2.0.9/user_doc/guide/project/workflow-instance.html',
+              },
+              {
+                title: 'Task Instance',
+                link: '/en-us/docs/2.0.9/user_doc/guide/project/task-instance.html',
+              },
+            ]
+          },
+          {
+            title: 'Task',
+            children: [
+              {
+                title: 'Shell',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/shell.html',
+              },
+              {
+                title: 'SubProcess',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/sub-process.html',
+              },
+              {
+                title: 'Dependent',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/dependent.html',
+              },
+              {
+                title: 'Stored Procedure',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/stored-procedure.html',
+              },
+              {
+                title: 'SQL',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/sql.html',
+              },
+              {
+                title: 'Spark',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/spark.html',
+              },
+              {
+                title: 'MapReduce',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/map-reduce.html',
+              },
+              {
+                title: 'Python',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/python.html',
+              },
+              {
+                title: 'Flink',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/flink.html',
+              },
+              {
+                title: 'HTTP',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/http.html',
+              },
+              {
+                title: 'DataX',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/datax.html',
+              },
+              {
+                title: 'Pigeon',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/pigeon.html',
+              },
+              {
+                title: 'Conditions',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/conditions.html',
+              },
+              {
+                title: 'Switch',
+                link: '/en-us/docs/2.0.9/user_doc/guide/task/switch.html',
+              },
+            ],
+          },
+          {
+            title: 'Parameter',
+            children: [
+              {
+                title: 'Built-in Parameter',
+                link: '/en-us/docs/2.0.9/user_doc/guide/parameter/built-in.html',
+              },
+              {
+                title: 'Global Parameter',
+                link: '/en-us/docs/2.0.9/user_doc/guide/parameter/global.html',
+              },
+              {
+                title: 'Local Parameter',
+                link: '/en-us/docs/2.0.9/user_doc/guide/parameter/local.html',
+              },
+              {
+                title: 'Parameter Context',
+                link: '/en-us/docs/2.0.9/user_doc/guide/parameter/context.html',
+              },
+              {
+                title: 'Parameter Priority',
+                link: '/en-us/docs/2.0.9/user_doc/guide/parameter/priority.html',
+              },
+            ],
+          },
+          {
+            title: 'Data Source',
+            children: [
+              {
+                title: 'Introduction',
+                link: '/en-us/docs/2.0.9/user_doc/guide/datasource/introduction.html',
+              },
+              {
+                title: 'MySQL',
+                link: '/en-us/docs/2.0.9/user_doc/guide/datasource/mysql.html',
+              },
+              {
+                title: 'PostgreSQL',
+                link: '/en-us/docs/2.0.9/user_doc/guide/datasource/postgresql.html',
+              },
+              {
+                title: 'HIVE',
+                link: '/en-us/docs/2.0.9/user_doc/guide/datasource/hive.html',
+              },
+              {
+                title: 'Spark',
+                link: '/en-us/docs/2.0.9/user_doc/guide/datasource/spark.html',
+              },
+            ],
+          },
+          {
+            title: 'Alert',
+            children: [
+              {
+                title: 'Alert Component User Guide ',
+                link: '/en-us/docs/2.0.9/user_doc/guide/alert/alert_plugin_user_guide.html',
+              },
+              {
+                title: 'Enterprise Wechat',
+                link: '/en-us/docs/2.0.9/user_doc/guide/alert/enterprise-wechat.html',
+              },
+              {
+                title: 'Ding Talk',
+                link: '/en-us/docs/2.0.9/user_doc/guide/alert/dingtalk.html',
+              },
+            ],
+          },
+          {
+            title: 'Resource',
+            link: '/en-us/docs/2.0.9/user_doc/guide/resource.html',
+          },
+          {
+            title: 'Monitor',
+            link: '/en-us/docs/2.0.9/user_doc/guide/monitor.html',
+          },
+          {
+            title: 'Security',
+            link: '/en-us/docs/2.0.9/user_doc/guide/security.html',
+          },
+          {
+            title: 'Flink',
+            link: '/en-us/docs/2.0.9/user_doc/guide/flink-call.html',
+          },
+          {
+            title: 'Upgrade',
+            link: '/en-us/docs/2.0.9/user_doc/guide/upgrade.html',
+          },
+          {
+            title: 'Expansion and Reduction',
+            link: '/en-us/docs/2.0.9/user_doc/guide/expansion-reduction.html',
+          },
+        ],
+      },
+      {
+        title: 'Advanced Guide',
+        children: [
+          {
+            title: 'Architecture Design',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/design.html',
+          },
+          {
+            title: 'Metadata',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/metadata.html',
+          },
+          {
+            title: 'Configuration File',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/configuration.html',
+          },
+          {
+            title: 'Task Structure',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/task-structure.html',
+          },
+          {
+            title: 'Load Balance',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/load-balance.html',
+          },
+          {
+            title: 'Cache',
+            link: '/en-us/docs/2.0.9/user_doc/architecture/cache.html',
+          },
+        ],
+      },
+      {
+        title: 'Observability',
+        children: [
+          {
+            title: 'SkyWalking-Agent',
+            link: '/en-us/docs/2.0.9/user_doc/guide/observability/skywalking-agent.html',
+          },
+        ]
+      },
+      {
+        title: 'API',
+        children: [
+          {
+            title: 'Open API',
+            link: '/en-us/docs/2.0.9/user_doc/guide/open-api.html',
+          },
+          {
+            title: 'PyDolphinScheduler',
+            link: '/python/2.0.9/index.html',
+          },
+        ],
+      },
+      {
+        title: 'Contribution',
+        children: [
+          {
+            title: 'Join',
+            children: [
+              {
+                title: 'Security Report',
+                link: '/en-us/docs/dev/user_doc/contribute/join/security.html',
+              },
+              {
+                title: 'How to Become a Committer',
+                link: '/en-us/docs/dev/user_doc/contribute/join/become-a-committer.html',
+              },
+              {
+                title: 'Subscribe Mailing Lists',
+                link: '/en-us/docs/dev/user_doc/contribute/join/subscribe.html',
+              },
+              {
+                title: 'Participate in Contributing',
+                link: '/en-us/docs/dev/user_doc/contribute/join/contribute.html',
+              },
+              {
+                title: 'Code of Conduct',
+                link: '/en-us/docs/dev/user_doc/contribute/join/code-conduct.html',
+              },
+              {
+                title: 'Review Issue or Pull Requests',
+                link: '/en-us/docs/dev/user_doc/contribute/join/review.html',
+              },
+              {
+                title: 'Submit Code',
+                link: '/en-us/docs/dev/user_doc/contribute/join/submit-code.html',
+              },
+              {
+                title: 'License Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/DS-License.html',
+              },
+              {
+                title: 'Document Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/document.html',
+              },
+              {
+                title: 'Issue Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/issue.html',
+              },
+              {
+                title: 'Pull Request Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/pull-request.html',
+              },
+              {
+                title: 'Commit Message Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/commit-message.html',
+              },
+              {
+                title: 'Micro BenchMark Notice',
+                link: '/en-us/docs/dev/user_doc/contribute/join/microbench.html',
+              },
+              {
+                title: 'Unit Test Writing Guide',
+                link: '/en-us/docs/dev/user_doc/contribute/join/unit-test.html',
+              },
+            ],
+          },
+          {
+            title: 'Development Environment Setup',
+            link: '/en-us/docs/dev/user_doc/contribute/development-environment-setup.html',
+          },
+          {
+            title: 'Design Document',
+            children: [
+              // TODO not support multiply level for now
+              // {
+              // title: 'SPI',
+              // children: [
+              {
+                title: 'Architecture Design',
+                link: '/en-us/docs/dev/user_doc/contribute/architecture-design.html',
+              },
+              {
+                title: 'Alert SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/alert.html',
+              },
+              {
+                title: 'Registry SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/registry.html',
+              },
+              {
+                title: 'Task SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/task.html',
+              },
+              {
+                title: 'Datasource SPI',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/spi/datasource.html',
+              },
+              {
+                title: 'Mechanism Design',
+                link: '/en-us/docs/dev/user_doc/contribute/backend/mechanism/overview.html',
+              },
+            ],
+          },
+          {
+            title: 'Guidelines',
+            children: [
+              {
+                title: 'Frontend Development',
+                link: '/en-us/docs/dev/user_doc/contribute/frontend-development.html',
+              },
+              {
+                title: 'API Standard',
+                link: '/en-us/docs/dev/user_doc/contribute/api-standard.html',
+              },
+            ],
+          },
+          {
+            title: 'Release Guide',
+            children: [
+              {
+                title: 'Release Preparation',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release-prepare.html',
+              },
+              {
+                title: 'Release Guide',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release.html',
+              },
+              {
+                title: 'Release Post',
+                link: '/en-us/docs/dev/user_doc/contribute/release/release-post.html',
+              },
+            ],
+          },
+          {
+            title: 'Questions & Communications',
+            link: '/en-us/docs/dev/user_doc/contribute/have-questions.html',
+          },
+        ],
+      },
+      {
+        title: 'FAQ',
+        children: [
+          {
+            title: 'FAQ',
+            link: '/en-us/docs/release/faq.html',
+          },
+        ],
+      },
+      {
+        title: 'Older Versions',
+        children: [
+          {
+            title: 'Older Versions',
+            link: '/en-us/docs/release/history-versions.html',
+          },
+        ],
+      },
+    ],
+    barText: 'Documentation',
+  },
+  'zh-cn': {
+    sidemenu: [
+      {
+        title: '关于Apache DolphinScheduler',
+        children: [
+          {
+            title: '简介',
+            link: '/zh-cn/docs/2.0.9/user_doc/About_DolphinScheduler/About_DolphinScheduler.html',
+          },
+          {
+            title: '建议配置',
+            link: '/zh-cn/docs/2.0.9/user_doc/guide/installation/hardware.html',
+          },
+          {
+            title: '名称解释',
+            link: '/zh-cn/docs/2.0.9/user_doc/architecture/designplus.html',
+          },          
+        ],
+      },
+      {
+        title: '快速上手',
+        children: [
+          {
+            title: '快速上手',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/quick-start.html',
+          },
+          {
+            title: 'Docker部署(Docker)',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/installation/docker.html',
+          },
+        ],
+      }, 
+      {
+        title: '部署指南',
+        children: [
+          {
+            title: '单机部署(Standalone)',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/installation/standalone.html',
+          },          
+          {
+            title: '伪集群部署(Pseudo-Cluster)',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/installation/pseudo-cluster.html',
+          },
+          {
+            title: '集群部署(Cluster)',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/installation/cluster.html',
+          },
+          {
+            title: 'Kubernetes部署(Kubernetes)',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/installation/kubernetes.html',
+          },
+        ],
+      }, 
+      {          
+        title: '功能介绍',
+        children: [
+          {
+            title: '指标总览',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/homepage.html',
+          },
+          {
+            title: '项目管理',
+            children: [
+              {
+                title: '项目列表',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/project/project-list.html',
+              },
+              {
+                title: '工作流定义',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/project/workflow-definition.html',
+              },
+              {
+                title: '工作流实例',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/project/workflow-instance.html',
+              },
+              {
+                title: '任务实例',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/project/task-instance.html',
+              },
+            ]
+          },
+          {
+            title: '任务类型',
+            children: [
+              {
+                title: 'Shell',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/shell.html',
+              },
+              {
+                title: 'SubProcess',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/sub-process.html',
+              },
+              {
+                title: 'Dependent',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/dependent.html',
+              },
+              {
+                title: 'Stored Procedure',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/stored-procedure.html',
+              },
+              {
+                title: 'SQL',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/sql.html',
+              },
+              {
+                title: 'Spark',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/spark.html',
+              },
+              {
+                title: 'MapReduce',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/map-reduce.html',
+              },
+              {
+                title: 'Python',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/python.html',
+              },
+              {
+                title: 'Flink',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/flink.html',
+              },
+              {
+                title: 'HTTP',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/http.html',
+              },
+              {
+                title: 'DataX',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/datax.html',
+              },
+              {
+                title: 'Pigeon',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/pigeon.html',
+              },
+              {
+                title: 'Conditions',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/conditions.html',
+              },
+              {
+                title: 'Switch',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/task/switch.html',
+              },
+            ],
+          },
+          {
+            title: '参数',
+            children: [
+              {
+                title: '内置参数',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/parameter/built-in.html',
+              },
+              {
+                title: '全局参数',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/parameter/global.html',
+              },
+              {
+                title: '本地参数',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/parameter/local.html',
+              },
+              {
+                title: '参数传递',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/parameter/context.html',
+              },
+              {
+                title: '参数优先级',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/parameter/priority.html',
+              },
+            ],
+          },
+          {
+            title: '数据源中心',
+            children: [
+              {
+                title: '简介',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/datasource/introduction.html',
+              },
+              {
+                title: 'MySQL',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/datasource/mysql.html',
+              },
+              {
+                title: 'PostgreSQL',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/datasource/postgresql.html',
+              },
+              {
+                title: 'HIVE',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/datasource/hive.html',
+              },
+              {
+                title: 'Spark',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/datasource/spark.html',
+              },
+            ],
+          },
+          {
+            title: '告警',
+            children: [
+              {
+                title: '告警组件向导 ',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/alert/alert_plugin_user_guide.html',
+              },
+              {
+                title: '企业微信',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/alert/enterprise-wechat.html',
+              },
+              {
+                title: '钉钉',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/alert/dingtalk.html',
+              },
+            ],
+          },
+          {
+            title: '资源中心',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/resource.html',
+          },
+          {
+            title: '监控中心',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/monitor.html',
+          },
+          {
+            title: '安全中心',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/security.html',
+          },
+          {
+            title: 'Flink调用',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/flink-call.html',
+          },
+          {
+            title: '升级',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/upgrade.html',
+          },
+          {
+            title: '扩/缩容',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/expansion-reduction.html',
+          },
+        ],
+      },
+      {
+        title: '高级指南',
+        children: [
+          {
+            title: '架构设计',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/design.html',
+          },
+          {
+            title: '元数据文档',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/metadata.html',
+          },
+          {
+            title: '配置文件',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/configuration.html',
+          },
+          {
+            title: '任务结构',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/task-structure.html',
+          },
+          {
+            title: '负载均衡',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/load-balance.html',
+          },
+          {
+            title: '缓存',
+            link: '/zh-cn/docs/2.0.7/user_doc/architecture/cache.html',
+          },
+        ],
+      },
+      {
+        title: '可观测性',
+        children: [
+          {
+            title: 'SkyWalking-Agent',
+            link: '/zh-cn/docs/2.0.7/user_doc/guide/observability/skywalking-agent.html',
+          },
+        ]
+      },
+      {
+          title: 'API',
+          children: [
+              {
+                title: 'API调用',
+                link: '/zh-cn/docs/2.0.7/user_doc/guide/open-api.html',
+              },
+              {
+                  title: 'PyDolphinScheduler',
+                  link: '/python/2.0.7/index.html',
+              },
+          ],
+      },
+      {
+        title: '贡献指南',
+        children: [
+          {
+            title: '如何参与',
+            children: [
+              {
+                title: '报告安全问题',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/security.html',
+              },
+              {
+                title: '如何成为 Committer',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/become-a-committer.html',
+              },
+              {
+                title: '订阅/取消订阅邮件列表',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/subscribe.html',
+              },
+              {
+                title: '参与贡献',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/contribute.html',
+              },
+              {
+                title: '行为准则',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/code-conduct.html',
+              },
+              {
+                title: 'Review Issue or Pull Requests',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/review.html',
+              },
+              {
+                title: '提交代码',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/submit-code.html',
+              },
+              {
+                title: 'License须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/DS-License.html',
+              },
+              {
+                title: '文档须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/document.html',
+              },
+              {
+                title: 'Issue须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/issue.html',
+              },
+              {
+                title: 'Pull Request须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/pull-request.html',
+              },
+              {
+                title: 'Commit Message须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/commit-message.html',
+              },
+              {
+                title: '微基准测试须知',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/microbench.html',
+              },
+              {
+                title: '单元测试编写指南',
+                link: '/zh-cn/docs/dev/user_doc/contribute/join/unit-test.html',
+              },
+            ],
+          },
+          {
+            title: '环境搭建',
+            link: '/zh-cn/docs/dev/user_doc/contribute/development-environment-setup.html',
+          },
+          {
+            title: '设计文档',
+            children: [
+              // TODO not support multiply level for now
+              // {
+              // title: 'SPI',
+              // children: [
+              {
+                title: '架构设计',
+                link: '/zh-cn/docs/dev/user_doc/contribute/architecture-design.html',
+              },
+              {
+                title: 'Alert SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/alert.html',
+              },
+              {
+                title: 'Registry SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/registry.html',
+              },
+              {
+                title: 'Task SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/task.html',
+              },
+              {
+                title: 'Datasource SPI',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/spi/datasource.html',
+              },
+              {
+                title: '组件设计',
+                link: '/zh-cn/docs/dev/user_doc/contribute/backend/mechanism/overview.html',
+              },
+            ],
+          },
+          {
+            title: '规范',
+            children: [
+              {
+                title: '前端开发',
+                link: '/zh-cn/docs/dev/user_doc/contribute/frontend-development.html',
+              },
+              {
+                title: 'API规范',
+                link: '/zh-cn/docs/dev/user_doc/contribute/api-standard.html',
+              },
+            ],
+          },
+          {
+            title: '发版指南',
+            children: [
+              {
+                title: '发版准备',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release-prepare.html',
+              },
+              {
+                title: '发版指南',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release.html',
+              },
+              {
+                title: '发版后续',
+                link: '/zh-cn/docs/dev/user_doc/contribute/release/release-post.html',
+              },
+            ],
+          },
+          {
+            title: '问题与交流',
+            link: '/zh-cn/docs/dev/user_doc/contribute/have-questions.html',
+          },
+        ],
+      },
+      {
+        title: 'FAQ',
+        children: [
+          {
+            title: 'FAQ',
+            link: '/zh-cn/docs/release/faq.html',
+          },
+        ],
+      },
+      {
+        title: '历史版本',
+        children: [
+          {
+            title: '历史版本',
+            link: '/zh-cn/docs/release/history-versions.html',
+          },
+        ],
+      },
+    ],
+    barText: '文档',
+  },
+};
diff --git a/docs/2.0.9/docs/en/About_DolphinScheduler/About_DolphinScheduler.md b/docs/2.0.9/docs/en/About_DolphinScheduler/About_DolphinScheduler.md
new file mode 100644
index 0000000..5f1cb64
--- /dev/null
+++ b/docs/2.0.9/docs/en/About_DolphinScheduler/About_DolphinScheduler.md
@@ -0,0 +1,12 @@
+# About DolphinScheduler
+
+Apache DolphinScheduler is a cloud-native visual Big Data workflow scheduler system, committed to “solving complex big-data task dependencies and triggering relationships in data OPS orchestration so that various types of big data tasks can be used out of the box”.
+
+# High Reliability
+- Decentralized multi-master and multi-worker, HA is supported by itself, overload processing
+# User-Friendly
+- All process definition operations are visualized, Visualization process defines key information at a glance, One-click deployment
+# Rich Scenarios
+- Support multi-tenant. Support many task types e.g., spark,flink,hive, mr, shell, python, sub_process
+# High Expansibility
+- Support custom task types, Distributed scheduling, and the overall scheduling capability will increase linearly with the scale of the cluster
diff --git a/docs/2.0.9/docs/en/architecture/cache.md b/docs/2.0.9/docs/en/architecture/cache.md
new file mode 100644
index 0000000..6a7359d
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/cache.md
@@ -0,0 +1,42 @@
+### Cache
+
+#### Purpose
+
+Due to the master-server scheduling process, there will be a large number of database read operations, such as `tenant`, `user`, `processDefinition`, etc. On the one hand, it will put a lot of pressure on the DB, and on the other hand, it will slow down the entire core scheduling process. 
+
+Considering that this part of the business data is a scenario where more reads and less writes are performed, a cache module is introduced to reduce the DB read pressure and speed up the core scheduling process;
+
+#### Cache settings
+
+```yaml
+spring:
+  cache:
+    # default disable cache, you can enable by `type: caffeine`
+    type: none
+    cache-names:
+      - tenant
+      - user
+      - processDefinition
+      - processTaskRelation
+      - taskDefinition
+    caffeine:
+      spec: maximumSize=100,expireAfterWrite=300s,recordStats
+```
+
+The cache-module use [spring-cache](https://spring.io/guides/gs/caching/), so you can set cache config in the spring application.yaml directly. Default disable cache, and you can enable it by `type: caffeine`.
+
+With the config of [caffeine](https://github.com/ben-manes/caffeine), you can set the cache size, expire time, etc.
+
+#### Cache Read
+
+The cache adopts the annotation `@Cacheable` of spring-cache and is configured in the mapper layer. For example: `TenantMapper`.
+
+#### Cache Evict
+
+The business data update comes from the api-server, and the cache end is in the master-server. So it is necessary to monitor the data update of the api-server (aspect intercept `@CacheEvict`), and the master-server will be notified when the cache eviction is required. 
+
+It should be noted that the final strategy for cache update comes from the user's expiration strategy configuration in caffeine, so please configure it in conjunction with the business;
+
+The sequence diagram is shown in the following figure:
+
+<img src="/img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/architecture/configuration.md b/docs/2.0.9/docs/en/architecture/configuration.md
new file mode 100644
index 0000000..b40201d
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/configuration.md
@@ -0,0 +1,409 @@
+<!-- markdown-link-check-disable -->
+
+# Preface
+This document explains the DolphinScheduler application configurations according to DolphinScheduler-1.3.x versions.
+
+# Directory Structure
+Currently, all the configuration files are under [conf ] directory. Please check the following simplified DolphinScheduler installation directories to have a direct view about the position [conf] directory in and configuration files inside. This document only describes DolphinScheduler configurations and other modules are not going into.
+
+[Note: the DolphinScheduler (hereinafter called the ‘DS’) .]
+```
+
+├─bin                               DS application commands directory
+│  ├─dolphinscheduler-daemon.sh         startup/shutdown DS application 
+│  ├─start-all.sh                  A     startup all DS services with configurations
+│  ├─stop-all.sh                        shutdown all DS services with configurations
+├─conf                              configurations directory
+│  ├─application-api.properties         API-service config properties
+│  ├─datasource.properties              datasource config properties
+│  ├─registry.properties               registry config properties
+│  ├─master.properties                  master config properties
+│  ├─worker.properties                  worker config properties
+│  ├─quartz.properties                  quartz config properties
+│  ├─common.properties                  common-service[storage] config properties
+│  ├─alert.properties                   alert-service config properties
+│  ├─config                             environment variables config directory
+│      ├─install_config.conf                DS environment variables configuration script[install/start DS]
+│  ├─env                                load environment variables configs script directory
+│      ├─dolphinscheduler_env.sh            load environment variables configs [eg: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]
+│  ├─org                                mybatis mapper files directory
+│  ├─i18n                               i18n configs directory
+│  ├─logback-api.xml                    API-service log config
+│  ├─logback-master.xml                 master-service log config
+│  ├─logback-worker.xml                 worker-service log config
+│  ├─logback-alert.xml                  alert-service log config
+├─sql                                   DS metadata to create/upgrade .sql directory
+│  ├─create                             create SQL scripts directory
+│  ├─upgrade                            upgrade SQL scripts directory
+│  ├─dolphinscheduler_postgre.sql       postgre database init script
+│  ├─dolphinscheduler_mysql.sql         mysql database init script
+│  ├─soft_version                       current DS version-id file
+├─script                            DS services deployment, database create/upgrade scripts directory
+│  ├─create-dolphinscheduler.sh         DS database init script
+│  ├─upgrade-dolphinscheduler.sh        DS database upgrade script
+│  ├─monitor-server.sh                  DS monitor-server start script       
+│  ├─scp-hosts.sh                       transfer installation files script                                     
+│  ├─remove-zk-node.sh                  cleanup zookeeper caches script       
+├─ui                                front-end web resources directory
+├─lib                               DS .jar dependencies directory
+├─install.sh                        auto-setup DS services script
+
+
+```
+
+
+# Configurations in Details
+
+serial number| service classification| config file|
+|--|--|--|
+1|startup/shutdown DS application|dolphinscheduler-daemon.sh
+2|datasource config properties| datasource.properties
+3|registry config properties|registry.properties
+4|common-service[storage] config properties|common.properties
+5|API-service config properties|application-api.properties
+6|master config properties|master.properties
+7|worker config properties|worker.properties
+8|alert-service config properties|alert.properties
+9|quartz config properties|quartz.properties
+10|DS environment variables configuration script[install/start DS]|install_config.conf
+11|load environment variables configs <br /> [eg: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]|dolphinscheduler_env.sh
+12|services log config files|API-service log config : logback-api.xml  <br /> master-service log config  : logback-master.xml    <br /> worker-service log config : logback-worker.xml  <br /> alert-service log config : logback-alert.xml 
+
+
+## 1.dolphinscheduler-daemon.sh [startup/shutdown DS application]
+dolphinscheduler-daemon.sh is responsible for DS startup & shutdown. 
+Essentially, start-all.sh/stop-all.sh startup/shutdown the cluster via dolphinscheduler-daemon.sh.
+Currently, DS just makes a basic config, please config further JVM options based on your practical situation of resources.
+
+Default simplified parameters are:
+```bash
+export DOLPHINSCHEDULER_OPTS="
+-server 
+-Xmx16g 
+-Xms1g 
+-Xss512k 
+-XX:+UseConcMarkSweepGC 
+-XX:+CMSParallelRemarkEnabled 
+-XX:+UseFastAccessorMethods 
+-XX:+UseCMSInitiatingOccupancyOnly 
+-XX:CMSInitiatingOccupancyFraction=70
+"
+```
+
+> "-XX:DisableExplicitGC" is not recommended due to may lead to memory link (DS dependent on Netty to communicate). 
+
+## 2.datasource.properties [datasource config properties]
+DS uses Druid to manage database connections and default simplified configs are:
+|Parameters | Default value| Description|
+|--|--|--|
+spring.datasource.driver-class-name||datasource driver
+spring.datasource.url||datasource connection url
+spring.datasource.username||datasource username
+spring.datasource.password||datasource password
+spring.datasource.initialSize|5| initail connection pool size number
+spring.datasource.minIdle|5| minimum connection pool size number
+spring.datasource.maxActive|5| maximum connection pool size number
+spring.datasource.maxWait|60000| max wait mili-seconds
+spring.datasource.timeBetweenEvictionRunsMillis|60000| idle connection check interval
+spring.datasource.timeBetweenConnectErrorMillis|60000| retry interval
+spring.datasource.minEvictableIdleTimeMillis|300000| connections over minEvictableIdleTimeMillis will be collect when idle check
+spring.datasource.validationQuery|SELECT 1| validate connection by running the SQL
+spring.datasource.validationQueryTimeout|3| validate connection timeout[seconds]
+spring.datasource.testWhileIdle|true| set whether the pool validates the allocated connection when a new connection request comes
+spring.datasource.testOnBorrow|true| validity check when the program requests a new connection
+spring.datasource.testOnReturn|false| validity check when the program recalls a connection
+spring.datasource.defaultAutoCommit|true| whether auto commit
+spring.datasource.keepAlive|true| runs validationQuery SQL to avoid the connection closed by pool when the connection idles over minEvictableIdleTimeMillis
+spring.datasource.poolPreparedStatements|true| Open PSCache
+spring.datasource.maxPoolPreparedStatementPerConnectionSize|20| specify the size of PSCache on each connection
+
+
+## 3.registry.properties [registry config properties, default is zookeeper]
+|Parameters | Default value| Description|
+|--|--|--|
+registry.plugin.name|zookeeper| plugin name
+registry.servers|localhost:2181| zookeeper cluster connection info
+registry.namespace|dolphinscheduler| DS is stored under zookeeper root directory(Start without /)
+registry.base.sleep.time.ms|60| time to wait between subsequent retries
+registry.max.sleep.ms|300| maximum time to wait between subsequent retries
+registry.max.retries|5| maximum retry times
+registry.session.timeout.ms|30000| session timeout
+registry.connection.timeout.ms|7500| connection timeout
+
+
+## 4.common.properties [hadoop、s3、yarn config properties]
+Currently, common.properties mainly configures hadoop/s3a related configurations. 
+|Parameters | Default value| Description|
+|--|--|--|
+data.basedir.path|/tmp/dolphinscheduler| local directory used to store temp files
+resource.storage.type|NONE| type of resource files: HDFS, S3, NONE
+resource.upload.path|/dolphinscheduler| storage path of resource files
+hadoop.security.authentication.startup.state|false| whether hadoop grant kerberos permission
+java.security.krb5.conf.path|/opt/krb5.conf|kerberos config directory
+login.user.keytab.username|hdfs-mycluster@ESZ.COM|kerberos username
+login.user.keytab.path|/opt/hdfs.headless.keytab|kerberos user keytab
+kerberos.expire.time|2|kerberos expire time,integer,the unit is hour
+resource.view.suffixs| txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties| file types supported by resource center
+hdfs.root.user|hdfs| configure users with corresponding permissions if storage type is HDFS
+fs.defaultFS|hdfs://mycluster:8020|If resource.storage.type=S3, then the request url would be similar to 's3a://dolphinscheduler'. Otherwise if resource.storage.type=HDFS and hadoop supports HA, please copy core-site.xml and hdfs-site.xml into 'conf' directory
+fs.s3a.endpoint||s3 endpoint url
+fs.s3a.access.key||s3 access key
+fs.s3a.secret.key||s3 secret key
+yarn.resourcemanager.ha.rm.ids||specify the yarn resourcemanager url. if resourcemanager supports HA, input HA IP addresses (separated by comma), or input null for standalone
+yarn.application.status.address|http://ds1:8088/ws/v1/cluster/apps/%s|keep default if resourcemanager supports HA or not use resourcemanager. Or replace ds1 with corresponding hostname if resourcemanager in standalone mode
+dolphinscheduler.env.path|env/dolphinscheduler_env.sh|load environment variables configs [eg: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]
+development.state|false| specify whether in development state
+
+
+## 5.application-api.properties [API-service log config]
+|Parameters | Default value| Description|
+|--|--|--|
+server.port|12345|api service communication port
+server.servlet.session.timeout|7200|session timeout
+server.servlet.context-path|/dolphinscheduler | request path
+spring.servlet.multipart.max-file-size|1024MB| maximum file size
+spring.servlet.multipart.max-request-size|1024MB| maximum request size
+server.jetty.max-http-post-size|5000000| jetty maximum post size
+spring.messages.encoding|UTF-8| message encoding
+spring.jackson.time-zone|GMT+8| time zone
+spring.messages.basename|i18n/messages| i18n config
+security.authentication.type|PASSWORD| authentication type
+
+
+## 6.master.properties [master-service log config]
+|Parameters | Default value| Description|
+|--|--|--|
+master.listen.port|5678|master listen port
+master.exec.threads|100|master execute thread number to limit process instances in parallel
+master.exec.task.num|20|master execute task number in parallel per process instance
+master.dispatch.task.num|3|master dispatch task number per batch
+master.host.selector|LowerWeight|master host selector to select a suitable worker, default value: LowerWeight. Optional values include Random, RoundRobin, LowerWeight
+master.heartbeat.interval|10|master heartbeat interval, the unit is second
+master.task.commit.retryTimes|5|master commit task retry times
+master.task.commit.interval|1000|master commit task interval, the unit is millisecond
+master.max.cpuload.avg|-1|master max CPU load avg, only higher than the system CPU load average, master server can schedule. default value -1: the number of CPU cores * 2
+master.reserved.memory|0.3|master reserved memory, only lower than system available memory, master server can schedule. default value 0.3, the unit is G
+
+
+## 7.worker.properties [worker-service log config]
+|Parameters | Default value| Description|
+|--|--|--|
+worker.listen.port|1234|worker listen port
+worker.exec.threads|100|worker execute thread number to limit task instances in parallel
+worker.heartbeat.interval|10|worker heartbeat interval, the unit is second
+worker.max.cpuload.avg|-1|worker max CPU load avg, only higher than the system CPU load average, worker server can be dispatched tasks. default value -1: the number of CPU cores * 2
+worker.reserved.memory|0.3|worker reserved memory, only lower than system available memory, worker server can be dispatched tasks. default value 0.3, the unit is G
+worker.groups|default|worker groups separated by comma, like 'worker.groups=default,test' <br> worker will join corresponding group according to this config when startup
+
+
+## 8.alert.properties [alert-service log config]
+|Parameters | Default value| Description|
+|--|--|--|
+alert.type|EMAIL|alter type|
+mail.protocol|SMTP|mail server protocol
+mail.server.host|xxx.xxx.com|mail server host
+mail.server.port|25|mail server port
+mail.sender|xxx@xxx.com|mail sender email
+mail.user|xxx@xxx.com|mail sender email name
+mail.passwd|111111|mail sender email password
+mail.smtp.starttls.enable|true|specify mail whether open tls
+mail.smtp.ssl.enable|false|specify mail whether open ssl
+mail.smtp.ssl.trust|xxx.xxx.com|specify mail ssl trust list
+xls.file.path|/tmp/xls|mail attachment temp storage directory
+||following configure WeCom[optional]|
+enterprise.wechat.enable|false|specify whether enable WeCom
+enterprise.wechat.corp.id|xxxxxxx|WeCom corp id
+enterprise.wechat.secret|xxxxxxx|WeCom secret
+enterprise.wechat.agent.id|xxxxxxx|WeCom agent id
+enterprise.wechat.users|xxxxxxx|WeCom users
+enterprise.wechat.token.url|https://qyapi.weixin.qq.com/cgi-bin/gettoken?  <br /> corpid=$corpId&corpsecret=$secret|WeCom token url
+enterprise.wechat.push.url|https://qyapi.weixin.qq.com/cgi-bin/message/send?  <br /> access_token=$token|WeCom push url
+enterprise.wechat.user.send.msg||send message format
+enterprise.wechat.team.send.msg||group message format
+plugin.dir|/Users/xx/your/path/to/plugin/dir|plugin directory
+
+
+## 9.quartz.properties [quartz config properties]
+This part describes quartz configs and please configure them based on your practical situation and resources.
+|Parameters | Default value| Description|
+|--|--|--|
+org.quartz.jobStore.driverDelegateClass | org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+org.quartz.jobStore.driverDelegateClass | org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
+org.quartz.scheduler.instanceName | DolphinScheduler
+org.quartz.scheduler.instanceId | AUTO
+org.quartz.scheduler.makeSchedulerThreadDaemon | true
+org.quartz.jobStore.useProperties | false
+org.quartz.threadPool.class | org.quartz.simpl.SimpleThreadPool
+org.quartz.threadPool.makeThreadsDaemons | true
+org.quartz.threadPool.threadCount | 25
+org.quartz.threadPool.threadPriority | 5
+org.quartz.jobStore.class | org.quartz.impl.jdbcjobstore.JobStoreTX
+org.quartz.jobStore.tablePrefix | QRTZ_
+org.quartz.jobStore.isClustered | true
+org.quartz.jobStore.misfireThreshold | 60000
+org.quartz.jobStore.clusterCheckinInterval | 5000
+org.quartz.jobStore.acquireTriggersWithinLock|true
+org.quartz.jobStore.dataSource | myDs
+org.quartz.dataSource.myDs.connectionProvider.class | org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider
+
+
+## 10.install_config.conf [DS environment variables configuration script[install/start DS]]
+install_config.conf is a bit complicated and is mainly used in the following two places.
+* 1.DS cluster auto installation
+
+> System will load configs in the install_config.conf and auto-configure files below, based on the file content when executing 'install.sh'.
+> Files such as dolphinscheduler-daemon.sh、datasource.properties、registry.properties、common.properties、application-api.properties、master.properties、worker.properties、alert.properties、quartz.properties and etc.
+
+
+* 2.Startup/shutdown DS cluster
+> The system will load masters, workers, alertServer, apiServers and other parameters inside the file to startup/shutdown DS cluster.
+
+File content as follows:
+```bash
+
+# Note:  please escape the character if the file contains special characters such as `.*[]^${}\+?|()@#&`.
+#   eg: `[` escape to `\[`
+
+# Database type (DS currently only supports PostgreSQL and MySQL)
+dbtype="mysql"
+
+# Database url & port
+dbhost="192.168.xx.xx:3306"
+
+# Database name
+dbname="dolphinscheduler"
+
+
+# Database username
+username="xx"
+
+# Database password
+password="xx"
+
+# Zookeeper url
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+
+# DS installation path, such as '/data1_1T/dolphinscheduler'
+installPath="/data1_1T/dolphinscheduler"
+
+# Deployment user
+# Note: Deployment user needs 'sudo' privilege and has rights to operate HDFS
+#     Root directory must be created by the same user if using HDFS, otherwise permission related issues will be raised.
+deployUser="dolphinscheduler"
+
+
+# Followings are alert-service configs
+# Mail server host
+mailServerHost="smtp.exmail.qq.com"
+
+# Mail server port
+mailServerPort="25"
+
+# Mail sender
+mailSender="xxxxxxxxxx"
+
+# Mail user
+mailUser="xxxxxxxxxx"
+
+# Mail password
+mailPassword="xxxxxxxxxx"
+
+# Mail supports TLS set true if not set false
+starttlsEnable="true"
+
+# Mail supports SSL set true if not set false. Note: starttlsEnable and sslEnable cannot both set true
+sslEnable="false"
+
+# Mail server host, same as mailServerHost
+sslTrust="smtp.exmail.qq.com"
+
+# Specify which resource upload function to use for resources storage such as sql files. And supported options are HDFS, S3 and NONE. HDFS for upload to HDFS and NONE for not using this function.
+resourceStorageType="NONE"
+
+# if S3, write S3 address. HA, for example: s3a://dolphinscheduler,
+# Note: s3 make sure to create the root directory /dolphinscheduler
+defaultFS="hdfs://mycluster:8020"
+
+# If parameter 'resourceStorageType' is S3, following configs are needed:
+s3Endpoint="http://192.168.xx.xx:9010"
+s3AccessKey="xxxxxxxxxx"
+s3SecretKey="xxxxxxxxxx"
+
+# If ResourceManager supports HA, then input master and standby node IP or hostname, eg: '192.168.xx.xx,192.168.xx.xx'. Or else ResourceManager run in standalone mode, please set yarnHaIps="" and "" for not using yarn.
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+
+
+# If ResourceManager runs in standalone, then set ResourceManager node ip or hostname, or else remain default.
+singleYarnIp="yarnIp1"
+
+# Storage path when using HDFS/S3
+resourceUploadPath="/dolphinscheduler"
+
+
+# HDFS/S3 root user
+hdfsRootUser="hdfs"
+
+# Followings are Kerberos configs
+
+# Spicify Kerberos enable or not
+kerberosStartUp="false"
+
+# Kdc krb5 config file path
+krb5ConfPath="$installPath/conf/krb5.conf"
+
+# Keytab username
+keytabUserName="hdfs-mycluster@ESZ.COM"
+
+# Username keytab path
+keytabPath="$installPath/conf/hdfs.headless.keytab"
+
+
+# API-service port
+apiServerPort="12345"
+
+
+# All hosts deploy DS
+ips="ds1,ds2,ds3,ds4,ds5"
+
+# Ssh port, default 22
+sshPort="22"
+
+# Master service hosts
+masters="ds1,ds2"
+
+# All hosts deploy worker service
+# Note: Each worker needs to set a worker group name and default name is "default"
+workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default"
+
+#  Host deploy alert-service
+alertServer="ds3"
+
+# Host deploy API-service
+apiServers="ds1"
+```
+
+## 11.dolphinscheduler_env.sh [load environment variables configs]
+When using shell to commit tasks, DS will load environment variables inside dolphinscheduler_env.sh into the host.
+Types of tasks involved are: Shell task、Python task、Spark task、Flink task、Datax task and etc.
+```bash
+export HADOOP_HOME=/opt/soft/hadoop
+export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+export SPARK_HOME1=/opt/soft/spark1
+export SPARK_HOME2=/opt/soft/spark2
+export PYTHON_HOME=/opt/soft/python
+export JAVA_HOME=/opt/soft/java
+export HIVE_HOME=/opt/soft/hive
+export FLINK_HOME=/opt/soft/flink
+export DATAX_HOME=/opt/soft/datax/bin/datax.py
+
+export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
+
+```
+
+## 12. Services logback configs
+Services name| logback config name |
+--|--|
+API-service logback config |logback-api.xml|
+master-service logback config|logback-master.xml |
+worker-service logback config|logback-worker.xml |
+alert-service logback config|logback-alert.xml |
diff --git a/docs/2.0.9/docs/en/architecture/design.md b/docs/2.0.9/docs/en/architecture/design.md
new file mode 100644
index 0000000..de3a41a
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/design.md
@@ -0,0 +1,339 @@
+## System Architecture Design
+
+Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
+scheduling system
+
+### 1.System Structure
+
+#### 1.1 System architecture diagram
+
+<p align="center">
+  <img src="/img/architecture-1.3.0.jpg" alt="System architecture diagram"  width="70%" />
+  <p align="center">
+        <em>System architecture diagram</em>
+  </p>
+</p>
+
+#### 1.2 Start process activity diagram
+
+<p align="center">
+  <img src="/img/master-process-2.0-en.png" alt="Start process activity diagram"  width="70%" />
+  <p align="center">
+        <em>Start process activity diagram</em>
+  </p>
+</p>
+
+#### 1.3 Architecture description
+
+* **MasterServer**
+
+  MasterServer adopts a distributed and centerless design concept. MasterServer is mainly responsible for DAG task
+  segmentation, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer at
+  the same time. When the MasterServer service starts, register a temporary node with Zookeeper, and perform fault
+  tolerance by monitoring changes in the temporary node of Zookeeper. MasterServer provides monitoring services based on
+  netty.
+
+  ##### The service mainly includes:
+    - **MasterSchedulerService** is a scanning thread that scans the **command** table in the database regularly,
+      generates workflow instances, and performs different business operations according to different **command types**
+
+    - **WorkflowExecuteThread** is mainly responsible for DAG task segmentation, task submission, logical processing of
+      various command types, processing task status and workflow status events
+
+    - **EventExecuteService** handles all state change events of the workflow instance that the master is responsible
+      for, and uses the thread pool to process the state events of the workflow
+
+    - **StateWheelExecuteThread** handles timing state updates of dependent tasks and timeout tasks
+
+* **WorkerServer**
+
+      WorkerServer also adopts a distributed centerless design concept, supports custom task plug-ins, and is mainly responsible for task execution and log services.
+      When the WorkerServer service starts, it registers a temporary node with Zookeeper and maintains a heartbeat.
+
+##### The service mainly includes
+
+    - **WorkerManagerThread** mainly receives tasks sent by the master through netty, and calls **TaskExecuteThread** corresponding executors according to different task types.
+     
+    - **RetryReportTaskStatusThread** mainly reports the task status to the master through netty. If the report fails, the report will always be retried.
+
+    - **LoggerServer** is a log service that provides log fragment viewing, refreshing and downloading functions
+
+* **Registry**
+
+  The registry is implemented as a plug-in, and Zookeeper is supported by default. The MasterServer and WorkerServer
+  nodes in the system use the registry for cluster management and fault tolerance. In addition, the system also performs
+  event monitoring and distributed locks based on the registry.
+
+* **Alert**
+
+  Provide alarm-related functions and only support stand-alone service. Support custom alarm plug-ins.
+
+* **API**
+
+  The API interface layer is mainly responsible for processing requests from the front-end UI layer. The service
+  uniformly provides RESTful APIs to provide request services to the outside world. Interfaces include workflow
+  creation, definition, query, modification, release, logoff, manual start, stop, pause, resume, start execution from
+  the node and so on.
+
+* **UI**
+
+  The front-end page of the system provides various visual operation interfaces of the system,See more
+  at [Introduction to Functions](../guide/homepage.md) section。
+
+#### 1.4 Architecture design ideas
+
+##### One、Decentralization VS centralization
+
+###### Centralized thinking
+
+The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into roles
+according to roles, which are roughly divided into two roles:
+
+<p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave character"  width="50%" />
+ </p>
+
+- The role of the master is mainly responsible for task distribution and monitoring the health status of the slave, and
+  can dynamically balance the task to the slave, so that the slave node will not be in a "busy dead" or "idle dead"
+  state.
+- The role of Worker is mainly responsible for task execution and maintenance and Master's heartbeat, so that Master can
+  assign tasks to Slave.
+
+Problems in centralized thought design:
+
+- Once there is a problem with the Master, the dragons are headless and the entire cluster will collapse. In order to
+  solve this problem, most of the Master/Slave architecture models adopt the design scheme of active and standby Master,
+  which can be hot standby or cold standby, or automatic switching or manual switching, and more and more new systems
+  are beginning to have The ability to automatically elect and switch Master to improve the availability of the system.
+- Another problem is that if the Scheduler is on the Master, although it can support different tasks in a DAG running on
+  different machines, it will cause the Master to be overloaded. If the Scheduler is on the slave, all tasks in a DAG
+  can only submit jobs on a certain machine. When there are more parallel tasks, the pressure on the slave may be
+  greater.
+
+###### Decentralized
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="Decentralization"  width="50%" />
+ </p>
+
+- In the decentralized design, there is usually no concept of Master/Slave, all roles are the same, the status is equal,
+  the global Internet is a typical decentralized distributed system, any node equipment connected to the network is
+  down, All will only affect a small range of functions.
+- The core design of decentralized design is that there is no "manager" different from other nodes in the entire
+  distributed system, so there is no single point of failure. However, because there is no "manager" node, each node
+  needs to communicate with other nodes to obtain the necessary machine information, and the unreliability of
+  distributed system communication greatly increases the difficulty of implementing the above functions.
+- In fact, truly decentralized distributed systems are rare. Instead, dynamic centralized distributed systems are
+  constantly pouring out. Under this architecture, the managers in the cluster are dynamically selected, rather than
+  preset, and when the cluster fails, the nodes of the cluster will automatically hold "meetings" to elect new "
+  managers" To preside over the work. The most typical case is Etcd implemented by ZooKeeper and Go language.
+
+
+- The decentralization of DolphinScheduler is that the Master/Worker is registered in Zookeeper to realize the
+  non-centralization of the Master cluster and the Worker cluster. The sharding mechanism is used to fairly distribute
+  the workflow for execution on the master, and tasks are sent to the workers for execution through different sending
+  strategies. Specific task
+
+##### Second, the master execution process
+
+1. DolphinScheduler uses the sharding algorithm to modulate the command and assigns it according to the sort id of the
+   master. The master converts the received command into a workflow instance, and uses the thread pool to process the
+   workflow instance
+
+2. DolphinScheduler's process of workflow:
+
+- Start the workflow through UI or API calls, and persist a command to the database
+- The Master scans the Command table through the sharding algorithm, generates a workflow instance ProcessInstance, and
+  deletes the Command data at the same time
+- The Master uses the thread pool to run WorkflowExecuteThread to execute the process of the workflow instance,
+  including building DAG, creating task instance TaskInstance, and sending TaskInstance to worker through netty
+- After the worker receives the task, it modifies the task status and returns the execution information to the Master
+- The Master receives the task information, persists it to the database, and stores the state change event in the
+  EventExecuteService event queue
+- EventExecuteService calls WorkflowExecuteThread according to the event queue to submit subsequent tasks and modify
+  workflow status
+
+##### Three、Insufficient thread loop waiting problem
+
+- If there is no sub-process in a DAG, if the number of data in the Command is greater than the threshold set by the
+  thread pool, the process directly waits or fails.
+- If many sub-processes are nested in a large DAG, the following figure will produce a "dead" state:
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/lack_thread.png" alt="Insufficient threads waiting loop problem"  width="50%" />
+ </p>
+In the above figure, MainFlowThread waits for the end of SubFlowThread1, SubFlowThread1 waits for the end of SubFlowThread2, SubFlowThread2 waits for the end of SubFlowThread3, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, so that the threads cannot be released. In this way, the state of the child-parent process loop waiting is formed. At this time, unless a new Master is started to add threads to break such a "stalemate", the scheduling cluster will no longer be used.
+
+It seems a bit unsatisfactory to start a new Master to break the deadlock, so we proposed the following three solutions
+to reduce this risk:
+
+1. Calculate the sum of all Master threads, and then calculate the number of threads required for each DAG, that is,
+   pre-calculate before the DAG process is executed. Because it is a multi-master thread pool, the total number of
+   threads is unlikely to be obtained in real time.
+2. Judge the single-master thread pool. If the thread pool is full, let the thread fail directly.
+3. Add a Command type with insufficient resources. If the thread pool is insufficient, suspend the main process. In this
+   way, there are new threads in the thread pool, which can make the process suspended by insufficient resources wake up
+   to execute again.
+
+note: The Master Scheduler thread is executed by FIFO when acquiring the Command.
+
+So we chose the third way to solve the problem of insufficient threads.
+
+##### Four、Fault-tolerant design
+
+Fault tolerance is divided into service downtime fault tolerance and task retry, and service downtime fault tolerance is
+divided into master fault tolerance and worker fault tolerance.
+
+###### 1. Downtime fault tolerance
+
+The service fault-tolerance design relies on ZooKeeper's Watcher mechanism, and the implementation principle is shown in the figure:
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="DolphinScheduler fault-tolerant design"  width="40%" />
+ </p>
+Among them, the Master monitors the directories of other Masters and Workers. If the remove event is heard, fault tolerance of the process instance or task instance will be performed according to the specific business logic.
+
+
+
+- Master fault tolerance:
+
+<p align="center">
+   <img src="/img/failover-master.jpg" alt="failover-master"  width="50%" />
+ </p>
+
+Fault tolerance range: From the perspective of host, the fault tolerance range of Master includes: own host + node host that does not exist in the registry, and the entire process of fault tolerance will be locked;
+
+Fault-tolerant content: Master's fault-tolerant content includes: fault-tolerant process instances and task instances. Before fault-tolerant, it compares the start time of the instance with the server start-up time, and skips fault-tolerance if after the server start time;
+
+Fault-tolerant post-processing: After the fault tolerance of ZooKeeper Master is completed, it is re-scheduled by the Scheduler thread in DolphinScheduler, traverses the DAG to find the "running" and "submit successful" tasks, monitors the status of its task instances for the "running" tasks, and "commits successful" tasks It is necessary to determine whether the task queue already exists. If it exists, the status of the task instance is also monitored. If it does not exist, resubmit the task instance.
+
+- Worker fault tolerance:
+
+<p align="center">
+   <img src="/img/failover-worker.jpg" alt="failover-worker"  width="50%" />
+ </p>
+
+Fault tolerance range: From the perspective of process instance, each Master is only responsible for fault tolerance of its own process instance; it will lock only when `handleDeadServer`;
+
+Fault-tolerant content: When sending the remove event of the Worker node, the Master only fault-tolerant task instances. Before fault-tolerant, it compares the start time of the instance with the server start-up time, and skips fault-tolerance if after the server start time;
+
+Fault-tolerant post-processing: Once the Master Scheduler thread finds that the task instance is in the "fault-tolerant" state, it takes over the task and resubmits it.
+
+Note: Due to "network jitter", the node may lose its heartbeat with ZooKeeper in a short period of time, and the node's remove event may occur. For this situation, we use the simplest way, that is, once the node and ZooKeeper timeout connection occurs, then directly stop the Master or Worker service.
+
+###### 2.Task failed and try again
+
+Here we must first distinguish the concepts of task failure retry, process failure recovery, and process failure rerun:
+
+- Task failure retry is at the task level and is automatically performed by the scheduling system. For example, if a
+  Shell task is set to retry for 3 times, it will try to run it again up to 3 times after the Shell task fails.
+- Process failure recovery is at the process level and is performed manually. Recovery can only be performed **from the
+  failed node** or **from the current node**
+- Process failure rerun is also at the process level and is performed manually, rerun is performed from the start node
+
+Next to the topic, we divide the task nodes in the workflow into two types.
+
+- One is a business node, which corresponds to an actual script or processing statement, such as Shell node, MR node,
+  Spark node, and dependent node.
+
+- There is also a logical node, which does not do actual script or statement processing, but only logical processing of
+  the entire process flow, such as sub-process sections.
+
+Each **business node** can be configured with the number of failed retries. When the task node fails, it will
+automatically retry until it succeeds or exceeds the configured number of retries. **Logical node** Failure retry is not
+supported. But the tasks in the logical node support retry.
+
+If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop,
+and the failed workflow can be manually rerun or process recovery operation
+
+##### Five、Task priority design
+
+In the early scheduling design, if there is no priority design and the fair scheduling design is used, the task
+submitted first may be completed at the same time as the task submitted later, and the process or task priority cannot
+be set, so We have redesigned this, and our current design is as follows:
+
+- According to **priority of different process instances** priority over **priority of the same process instance**
+  priority over **priority of tasks within the same process**priority over **tasks within the same process**submission
+  order from high to Low task processing.
+    - The specific implementation is to parse the priority according to the JSON of the task instance, and then save
+      the **process instance priority_process instance id_task priority_task id** information in the ZooKeeper task
+      queue, when obtained from the task queue, pass String comparison can get the tasks that need to be executed first
+
+        - The priority of the process definition is to consider that some processes need to be processed before other
+          processes. This can be configured when the process is started or scheduled to start. There are 5 levels in
+          total, which are HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="Process priority configuration"  width="40%" />
+             </p>
+
+        - The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, LOWEST. As
+          shown below
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="Task priority configuration"  width="35%" />
+             </p>
+
+##### Six、Logback and netty implement log access
+
+- Since Web (UI) and Worker are not necessarily on the same machine, viewing the log cannot be like querying a local
+  file. There are two options:
+- Put logs on the ES search engine
+- Obtain remote log information through netty communication
+
+- In consideration of the lightness of DolphinScheduler as much as possible, so I chose gRPC to achieve remote access to
+  log information.
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc remote access"  width="50%" />
+ </p>
+
+- We use the FileAppender and Filter functions of the custom Logback to realize that each task instance generates a log
+  file.
+- FileAppender is mainly implemented as follows:
+
+ ```java
+ /**
+  * task log appender
+  */
+ public class TaskLogAppender extends FileAppender<ILoggingEvent> {
+ 
+     ...
+
+    @Override
+    protected void append(ILoggingEvent event) {
+
+        if (currentlyActiveFile == null){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        // thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split("-");
+        // logId = processDefineId_processInstanceId_taskInstanceId
+        String logId = threadNameArr[1];
+        ...
+        super.subAppend(event);
+    }
+}
+
+
+Generate logs in the form of /process definition id/process instance id/task instance id.log
+
+- Filter to match the thread name starting with TaskLogInfo:
+
+- TaskLogFilter is implemented as follows:
+
+ ```java
+ /**
+ *  task log filter
+ */
+public class TaskLogFilter extends Filter<ILoggingEvent> {
+
+    @Override
+    public FilterReply decide(ILoggingEvent event) {
+        if (event.getThreadName().startsWith("TaskLogInfo-")){
+            return FilterReply.ACCEPT;
+        }
+        return FilterReply.DENY;
+    }
+}
+
diff --git a/docs/2.0.9/docs/en/architecture/designplus.md b/docs/2.0.9/docs/en/architecture/designplus.md
new file mode 100644
index 0000000..541d572
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/designplus.md
@@ -0,0 +1,79 @@
+## System Architecture Design
+
+Before explaining the architecture of the scheduling system, let's first understand the commonly used terms of the
+scheduling system
+
+### 1.Glossary
+
+**DAG:** The full name is Directed Acyclic Graph, referred to as DAG. Task tasks in the workflow are assembled in the
+form of a directed acyclic graph, and topological traversal is performed from nodes with zero degrees of entry until
+there are no subsequent nodes. Examples are as follows:
+
+<p align="center">
+  <img src="/img/dag_examples_cn.jpg" alt="dag example"  width="60%" />
+  <p align="center">
+        <em>dag example</em>
+  </p>
+</p>
+
+**Process definition**: Visualization formed by dragging task nodes and establishing task node associations**DAG**
+
+**Process instance**: The process instance is the instantiation of the process definition, which can be generated by
+manual start or scheduled scheduling. Each time the process definition runs, a process instance is generated
+
+**Task instance**: The task instance is the instantiation of the task node in the process definition, which identifies
+the specific task execution status
+
+**Task type**: Currently supports SHELL, SQL, SUB_PROCESS (sub-process), PROCEDURE, MR, SPARK, PYTHON, DEPENDENT (
+depends), and plans to support dynamic plug-in expansion, note: **SUB_PROCESS**  It is also a separate process
+definition that can be started and executed separately
+
+**Scheduling method**: The system supports scheduled scheduling and manual scheduling based on cron expressions. Command
+type support: start workflow, start execution from current node, resume fault-tolerant workflow, resume pause process,
+start execution from failed node, complement, timing, rerun, pause, stop, resume waiting thread. Among them **Resume
+fault-tolerant workflow** and **Resume waiting thread** The two command types are used by the internal control of
+scheduling, and cannot be called from the outside
+
+**Scheduled**: System adopts **quartz** distributed scheduler, and supports the visual generation of cron expressions
+
+**Rely**: The system not only supports **DAG** simple dependencies between the predecessor and successor nodes, but also
+provides **task dependent** nodes, supporting **between processes**
+
+**Priority**: Support the priority of process instances and task instances, if the priority of process instances and
+task instances is not set, the default is first-in-first-out
+
+**Email alert**: Support **SQL task** Query result email sending, process instance running result email alert and fault
+tolerance alert notification
+
+**Failure strategy**: For tasks running in parallel, if a task fails, two failure strategy processing methods are
+provided. **Continue** refers to regardless of the status of the task running in parallel until the end of the process
+failure. **End** means that once a failed task is found, Kill will also run the parallel task at the same time, and the
+process fails and ends
+
+**Complement**: Supplement historical data,Supports **interval parallel and serial** two complement methods
+
+### 2.Module introduction
+
+- dolphinscheduler-alert alarm module, providing AlertServer service.
+
+- dolphinscheduler-api web application module, providing ApiServer service.
+
+- dolphinscheduler-common General constant enumeration, utility class, data structure or base class
+
+- dolphinscheduler-dao provides operations such as database access.
+
+- dolphinscheduler-remote client and server based on netty
+
+- dolphinscheduler-server MasterServer and WorkerServer services
+
+- dolphinscheduler-service service module, including Quartz, Zookeeper, log client access service, easy to call server
+  module and api module
+
+- dolphinscheduler-ui front-end module
+
+### Sum up
+
+From the perspective of scheduling, this article preliminarily introduces the architecture principles and implementation
+ideas of the big data distributed workflow scheduling system-DolphinScheduler. To be continued
+
+
diff --git a/docs/2.0.9/docs/en/architecture/load-balance.md b/docs/2.0.9/docs/en/architecture/load-balance.md
new file mode 100644
index 0000000..33a8330
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/load-balance.md
@@ -0,0 +1,61 @@
+### Load Balance
+
+Load balancing refers to the reasonable allocation of server pressure through routing algorithms (usually in cluster environments) to achieve the maximum optimization of server performance.
+
+
+
+### DolphinScheduler-Worker load balancing algorithms
+
+DolphinScheduler-Master allocates tasks to workers, and by default provides three algorithms:
+
+Weighted random (random)
+
+Smoothing polling (roundrobin)
+
+Linear load (lowerweight)
+
+The default configuration is the linear load.
+
+As the routing is done on the client side, the master service, you can change master.host.selector in master.properties to configure the algorithm what you want.
+
+eg: master.host.selector = random (case-insensitive)
+
+### Worker load balancing configuration
+
+The configuration file is worker.properties
+
+#### weight
+
+All of the above load algorithms are weighted based on weights, which affect the outcome of the triage. You can set different weights for different machines by modifying the worker.weight value.
+
+####  Preheating
+
+With JIT optimisation in mind, we will let the worker run at low power for a period of time after startup so that it can gradually reach its optimal state, a process we call preheating. If you are interested, you can read some articles about JIT.
+
+So the worker will gradually reach its maximum weight over time after it starts (by default ten minutes, we don't provide a configuration item, you can change it and submit a PR if needed).
+
+### Load balancing algorithm breakdown
+
+#### Random (weighted)
+
+This algorithm is relatively simple, one of the matched workers is selected at random (the weighting affects his weighting).
+
+#### Smoothed polling (weighted)
+
+An obvious drawback of the weighted polling algorithm. Namely, under certain specific weights, weighted polling scheduling generates an uneven sequence of instances, and this unsmoothed load may cause some instances to experience transient high loads, leading to a risk of system downtime. To address this scheduling flaw, we provide a smooth weighted polling algorithm.
+
+Each worker is given two weights, weight (which remains constant after warm-up is complete) and current_weight (which changes dynamically), for each route. The current_weight + weight is iterated over all the workers, and the weight of all the workers is added up and counted as total_weight, then the worker with the largest current_weight is selected as the worker for this task. current_weight-total_weight.
+
+#### Linear weighting (default algorithm)
+
+The algorithm reports its own load information to the registry at regular intervals. We base our judgement on two main pieces of information
+
+- load average (default is the number of CPU cores * 2)
+- available physical memory (default is 0.3, in G)
+
+If either of the two is lower than the configured item, then this worker will not participate in the load. (no traffic will be allocated)
+
+You can customise the configuration by changing the following properties in worker.properties
+
+- worker.max.cpuload.avg=-1 (worker max cpuload avg, only higher than the system cpu load average, worker server can be dispatched tasks. default value -1: the number of cpu cores * 2)
+- worker.reserved.memory=0.3 (worker reserved memory, only lower than system available memory, worker server can be dispatched tasks. default value 0.3, the unit is G)
diff --git a/docs/2.0.9/docs/en/architecture/metadata.md b/docs/2.0.9/docs/en/architecture/metadata.md
new file mode 100644
index 0000000..9b66e1e
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/metadata.md
@@ -0,0 +1,173 @@
+# Dolphin Scheduler 2.0.3 MetaData
+
+<a name="V5KOl"></a>
+### Dolphin Scheduler 2.0 DB Table Overview
+| Table Name | Comment |
+| :---: | :---: |
+| t_ds_access_token | token for access ds backend |
+| t_ds_alert | alert detail |
+| t_ds_alertgroup | alert group |
+| t_ds_command | command detail |
+| t_ds_datasource | data source |
+| t_ds_error_command | error command detail |
+| t_ds_process_definition | process difinition |
+| t_ds_process_instance | process instance |
+| t_ds_project | project |
+| t_ds_queue | queue |
+| t_ds_relation_datasource_user | datasource related to user |
+| t_ds_relation_process_instance | sub process |
+| t_ds_relation_project_user | project related to user |
+| t_ds_relation_resources_user | resource related to user |
+| t_ds_relation_udfs_user | UDF related to user |
+| t_ds_relation_user_alertgroup | alert group related to user |
+| t_ds_resources | resoruce center file |
+| t_ds_schedules | process difinition schedule |
+| t_ds_session | user login session |
+| t_ds_task_instance | task instance |
+| t_ds_tenant | tenant |
+| t_ds_udfs | UDF resource |
+| t_ds_user | user detail |
+| t_ds_version | ds version |
+
+
+---
+
+<a name="XCLy1"></a>
+### E-R Diagram
+<a name="5hWWZ"></a>
+#### User Queue DataSource
+![image.png](/img/metadata-erd/user-queue-datasource.png)
+
+- Multiple users can belong to one tenant
+- The queue field in the t_ds_user table stores the queue_name information in the t_ds_queue table, but t_ds_tenant stores queue information using queue_id. During the execution of the process definition, the user queue has the highest priority. If the user queue is empty, the tenant queue is used.
+- The user_id field in the t_ds_datasource table indicates the user who created the data source. The user_id in t_ds_relation_datasource_user indicates the user who has permission to the data source.
+<a name="7euSN"></a>
+#### Project Resource Alert
+![image.png](/img/metadata-erd/project-resource-alert.png)
+
+- User can have multiple projects, User project authorization completes the relationship binding using project_id and user_id in t_ds_relation_project_user table
+- The user_id in the t_ds_projcet table represents the user who created the project, and the user_id in the t_ds_relation_project_user table represents users who have permission to the project
+- The user_id in the t_ds_resources table represents the user who created the resource, and the user_id in t_ds_relation_resources_user represents the user who has permissions to the resource
+- The user_id in the t_ds_udfs table represents the user who created the UDF, and the user_id in the t_ds_relation_udfs_user table represents a user who has permission to the UDF
+<a name="JEw4v"></a>
+#### Command Process Task
+![image.png](/img/metadata-erd/command.png)<br />![image.png](/img/metadata-erd/process-task.png)
+
+- A project has multiple process definitions, a process definition can generate multiple process instances, and a process instance can generate multiple task instances
+- The t_ds_schedulers table stores the timing schedule information for process difinition
+- The data stored in the t_ds_relation_process_instance table is used to deal with that the process definition contains sub-processes, parent_process_instance_id field represents the id of the main process instance containing the child process, process_instance_id field represents the id of the sub-process instance, parent_task_instance_id field represents the task instance id of the sub-process node
+- The process instance table and the task instance table correspond to the t_ds_process_instance table and the t_ds_task_instance table, respectively.
+
+---
+
+<a name="yd79T"></a>
+### Core Table Schema
+<a name="6bVhH"></a>
+#### t_ds_process_definition
+| Field | Type | Comment |
+| --- | --- | --- |
+| id | int | primary key |
+| name | varchar | process definition name |
+| version | int | process definition version |
+| release_state | tinyint | process definition release state:0:offline,1:online |
+| project_id | int | project id |
+| user_id | int | process definition creator id |
+| process_definition_json | longtext | process definition json content |
+| description | text | process difinition desc |
+| global_params | text | global parameters |
+| flag | tinyint | process is available: 0 not available, 1 available |
+| locations | text | Node location information |
+| connects | text | Node connection information |
+| receivers | text | receivers |
+| receivers_cc | text | carbon copy list |
+| create_time | datetime | create time |
+| timeout | int | timeout |
+| tenant_id | int | tenant id |
+| update_time | datetime | update time |
+
+<a name="t5uxM"></a>
+#### t_ds_process_instance
+| Field | Type | Comment |
+| --- | --- | --- |
+| id | int | primary key |
+| name | varchar | process instance name |
+| process_definition_id | int | process definition id |
+| state | tinyint | process instance Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete |
+| recovery | tinyint | process instance failover flag:0:normal,1:failover instance |
+| start_time | datetime | process instance start time |
+| end_time | datetime | process instance end time |
+| run_times | int | process instance run times |
+| host | varchar | process instance host |
+| command_type | tinyint | command type:0 start ,1 Start from the current node,2 Resume a fault-tolerant process,3 Resume Pause Process, 4 Execute from the failed node,5 Complement, 6 dispatch, 7 re-run, 8 pause, 9 stop ,10 Resume waiting thread |
+| command_param | text | json command parameters |
+| task_depend_type | tinyint | task depend type. 0: only current node,1:before the node,2:later nodes |
+| max_try_times | tinyint | max try times |
+| failure_strategy | tinyint | failure strategy. 0:end the process when node failed,1:continue running the other nodes when node failed |
+| warning_type | tinyint | warning type. 0:no warning,1:warning if process success,2:warning if process failed,3:warning if success |
+| warning_group_id | int | warning group id |
+| schedule_time | datetime | schedule time |
+| command_start_time | datetime | command start time |
+| global_params | text | global parameters |
+| process_instance_json | longtext | process instance json |
+| flag | tinyint | process instance is available: 0 not available, 1 available |
+| update_time | timestamp | update time |
+| is_sub_process | int | whether the process is sub process: 1 sub-process, 0 not sub-process |
+| executor_id | int | executor id |
+| locations | text | Node location information |
+| connects | text | Node connection information |
+| history_cmd | text | history commands of process instance operation |
+| dependence_schedule_times | text | depend schedule fire time |
+| process_instance_priority | int | process instance priority. 0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group_id | int | worker group id |
+| timeout | int | time out |
+| tenant_id | int | tenant id |
+
+<a name="tHZsY"></a>
+#### t_ds_task_instance
+| Field | Type | Comment |
+| --- | --- | --- |
+| id | int | primary key |
+| name | varchar | task name |
+| task_type | varchar | task type |
+| process_definition_id | int | process definition id |
+| process_instance_id | int | process instance id |
+| task_json | longtext | task content json |
+| state | tinyint | Status: 0 commit succeeded, 1 running, 2 prepare to pause, 3 pause, 4 prepare to stop, 5 stop, 6 fail, 7 succeed, 8 need fault tolerance, 9 kill, 10 wait for thread, 11 wait for dependency to complete |
+| submit_time | datetime | task submit time |
+| start_time | datetime | task start time |
+| end_time | datetime | task end time |
+| host | varchar | host of task running on |
+| execute_path | varchar | task execute path in the host |
+| log_path | varchar | task log path |
+| alert_flag | tinyint | whether alert |
+| retry_times | int | task retry times |
+| pid | int | pid of task |
+| app_link | varchar | yarn app id |
+| flag | tinyint | taskinstance is available: 0 not available, 1 available |
+| retry_interval | int | retry interval when task failed  |
+| max_retry_times | int | max retry times |
+| task_instance_priority | int | task instance priority:0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group_id | int | worker group id |
+
+<a name="gLGtm"></a>
+#### t_ds_command
+| Field | Type | Comment |
+| --- | --- | --- |
+| id | int | primary key |
+| command_type | tinyint | Command type: 0 start workflow, 1 start execution from current node, 2 resume fault-tolerant workflow, 3 resume pause process, 4 start execution from failed node, 5 complement, 6 schedule, 7 rerun, 8 pause, 9 stop, 10 resume waiting thread |
+| process_definition_id | int | process definition id |
+| command_param | text | json command parameters |
+| task_depend_type | tinyint | Node dependency type: 0 current node, 1 forward, 2 backward |
+| failure_strategy | tinyint | Failed policy: 0 end, 1 continue |
+| warning_type | tinyint | Alarm type: 0 is not sent, 1 process is sent successfully, 2 process is sent failed, 3 process is sent successfully and all failures are sent |
+| warning_group_id | int | warning group |
+| schedule_time | datetime | schedule time |
+| start_time | datetime | start time |
+| executor_id | int | executor id |
+| dependence | varchar | dependence |
+| update_time | datetime | update time |
+| process_instance_priority | int | process instance priority: 0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group_id | int | worker group id |
+
+
+
diff --git a/docs/2.0.9/docs/en/architecture/task-structure.md b/docs/2.0.9/docs/en/architecture/task-structure.md
new file mode 100644
index 0000000..a62f58d
--- /dev/null
+++ b/docs/2.0.9/docs/en/architecture/task-structure.md
@@ -0,0 +1,1131 @@
+
+# Overall Tasks Storage Structure
+All tasks created in DolphinScheduler are saved in the t_ds_process_definition table.
+
+The following shows the 't_ds_process_definition' table structure:
+
+
+No. | field  | type  |  description
+-------- | ---------| -------- | ---------
+1|id|int(11)|primary key
+2|name|varchar(255)|process definition name
+3|version|int(11)|process definition version
+4|release_state|tinyint(4)|release status of process definition: 0 not online, 1 online
+5|project_id|int(11)|project id
+6|user_id|int(11)|user id of the process definition
+7|process_definition_json|longtext|process definition JSON
+8|description|text|process definition description
+9|global_params|text|global parameters
+10|flag|tinyint(4)|specify whether the process is available: 0 is not available, 1 is available
+11|locations|text|node location information
+12|connects|text|node connectivity info
+13|receivers|text|receivers
+14|receivers_cc|text|CC receivers
+15|create_time|datetime|create time
+16|timeout|int(11) |timeout
+17|tenant_id|int(11) |tenant id
+18|update_time|datetime|update time
+19|modify_by|varchar(36)|specifics of the user that made the modification
+20|resource_ids|varchar(255)|resource ids
+
+The 'process_definition_json' field is the core field, which defines the task information in the DAG diagram, and it is stored in JSON format.
+
+The following table describes the common data structure.
+No. | field  | type  |  description
+-------- | ---------| -------- | ---------
+1|globalParams|Array|global parameters
+2|tasks|Array|task collections in the process [for the structure of each type, please refer to the following sections]
+3|tenantId|int|tenant ID
+4|timeout|int|timeout
+
+Data example:
+```bash
+{
+    "globalParams":[
+        {
+            "prop":"golbal_bizdate",
+            "direct":"IN",
+            "type":"VARCHAR",
+            "value":"${system.biz.date}"
+        }
+    ],
+    "tasks":Array[1],
+    "tenantId":0,
+    "timeout":0
+}
+```
+
+# The Detailed Explanation of The Storage Structure of Each Task Type
+
+## Shell Nodes
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task Id|
+2|type ||String |task type |SHELL
+3| name| |String|task name |
+4| params| |Object|customized parameters |Json format
+5| |rawScript |String| Shell script |
+6| | localParams| Array|customized local parameters||
+7| | resourceList| Array|resource files||
+8|description | |String|description | |
+9|runFlag | |String |execution flag| |
+10|conditionResult | |Object|condition branch | |
+11| | successNode| Array|jump to node if success| |
+12| | failedNode|Array|jump to node if failure| 
+13| dependence| |Object |task dependency |mutual exclusion with params
+14|maxRetryTimes | |String|max retry times | |
+15|retryInterval | |String |retry interval| |
+16|timeout | |Object|timeout | |
+17| taskInstancePriority| |String|task priority | |
+18|workerGroup | |String |Worker group| |
+19|preTasks | |Array|preposition tasks | |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"SHELL",
+    "id":"tasks-80760",
+    "name":"Shell Task",
+    "params":{
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "rawScript":"echo "This is a shell script""
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+
+```
+
+
+## SQL Node
+Perform data query and update operations on the specified datasource through SQL.
+
+**The node data structure is as follows:**
+No.|parameter name||type|description |note
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String|task id|
+2|type ||String |task type |SQL
+3| name| |String|task name|
+4| params| |Object|customized parameters|Json format
+5| |type |String |database type
+6| |datasource |Int |datasource id
+7| |sql |String |query SQL statement
+8| |udfs | String| udf functions|specify UDF function ids, separate by comma
+9| |sqlType | String| SQL node type |0 for query and 1 for none-query SQL
+10| |title |String | mail title
+11| |receivers |String |receivers
+12| |receiversCc |String |CC receivers
+13| |showType | String|display type of mail|optionals: TABLE or ATTACHMENT
+14| |connParams | String|connect parameters
+15| |preStatements | Array|preposition SQL statements
+16| | postStatements| Array|postposition SQL statements||
+17| | localParams| Array|customized parameters||
+18|description | |String|description | |
+19|runFlag | |String |execution flag| |
+20|conditionResult | |Object|condition branch  | |
+21| | successNode| Array|jump to node if success| |
+22| | failedNode|Array|jump to node if failure| 
+23| dependence| |Object |task dependency |mutual exclusion with params
+24|maxRetryTimes | |String|max retry times | |
+25|retryInterval | |String |retry interval| |
+26|timeout | |Object|timeout | |
+27| taskInstancePriority| |String|task priority | |
+28|workerGroup | |String |Worker group| |
+29|preTasks | |Array|preposition tasks | |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"SQL",
+    "id":"tasks-95648",
+    "name":"SqlTask-Query",
+    "params":{
+        "type":"MYSQL",
+        "datasource":1,
+        "sql":"select id , namge , age from emp where id =  ${id}",
+        "udfs":"",
+        "sqlType":"0",
+        "title":"xxxx@xxx.com",
+        "receivers":"xxxx@xxx.com",
+        "receiversCc":"",
+        "showType":"TABLE",
+        "localParams":[
+            {
+                "prop":"id",
+                "direct":"IN",
+                "type":"INTEGER",
+                "value":"1"
+            }
+        ],
+        "connParams":"",
+        "preStatements":[
+            "insert into emp ( id,name ) value (1,'Li' )"
+        ],
+        "postStatements":[
+
+        ]
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+
+## PROCEDURE [stored procedures] Node
+**The node data structure is as follows:**
+**Node data example:**
+
+## SPARK Node
+**The node data structure is as follows:**
+
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task Id|
+2|type ||String |task type |SPARK
+3| name| |String|task name |
+4| params| |Object|customized parameters |Json format
+5| |mainClass |String | main class
+6| |mainArgs | String| execution arguments
+7| |others | String| other arguments
+8| |mainJar |Object | application jar package
+9| |deployMode |String |deployment mode |local,client,cluster
+10| |driverCores | String| driver cores
+11| |driverMemory | String| driver memory
+12| |numExecutors |String | executor count
+13| |executorMemory |String | executor memory
+14| |executorCores |String | executor cores
+15| |programType | String| program type|JAVA,SCALA,PYTHON
+16| | sparkVersion| String|	Spark version| SPARK1 , SPARK2
+17| | localParams| Array|customized local parameters
+18| | resourceList| Array|resource files
+19|description | |String|description | |
+20|runFlag | |String |execution flag| |
+21|conditionResult | |Object|condition branch| |
+22| | successNode| Array|jump to node if success| |
+23| | failedNode|Array|jump to node if failure| 
+24| dependence| |Object |task dependency |mutual exclusion with params
+25|maxRetryTimes | |String|max retry times | |
+26|retryInterval | |String |retry interval| |
+27|timeout | |Object|timeout | |
+28| taskInstancePriority| |String|task priority | |
+29|workerGroup | |String |Worker group| |
+30|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"SPARK",
+    "id":"tasks-87430",
+    "name":"SparkTask",
+    "params":{
+        "mainClass":"org.apache.spark.examples.SparkPi",
+        "mainJar":{
+            "id":4
+        },
+        "deployMode":"cluster",
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "driverCores":1,
+        "driverMemory":"512M",
+        "numExecutors":2,
+        "executorMemory":"2G",
+        "executorCores":2,
+        "mainArgs":"10",
+        "others":"",
+        "programType":"SCALA",
+        "sparkVersion":"SPARK2"
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+
+
+## MapReduce(MR) Node
+**The node data structure is as follows:**
+
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task Id|
+2|type ||String |task type |MR
+3| name| |String|task name |
+4| params| |Object|customized parameters |Json format
+5| |mainClass |String | main class
+6| |mainArgs | String|execution arguments
+7| |others | String|other arguments
+8| |mainJar |Object | application jar package
+9| |programType | String|program type|JAVA,PYTHON
+10| | localParams| Array|customized local parameters
+11| | resourceList| Array|resource files
+12|description | |String|description | |
+13|runFlag | |String |execution flag| |
+14|conditionResult | |Object|condition branch| |
+15| | successNode| Array|jump to node if success| |
+16| | failedNode|Array|jump to node if failure| 
+17| dependence| |Object |task dependency |mutual exclusion with params
+18|maxRetryTimes | |String|max retry times | |
+19|retryInterval | |String |retry interval| |
+20|timeout | |Object|timeout | |
+21| taskInstancePriority| |String|task priority| |
+22|workerGroup | |String |Worker group| |
+23|preTasks | |Array|preposition tasks| |
+
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"MR",
+    "id":"tasks-28997",
+    "name":"MRTask",
+    "params":{
+        "mainClass":"wordcount",
+        "mainJar":{
+            "id":5
+        },
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "mainArgs":"/tmp/wordcount/input /tmp/wordcount/output/",
+        "others":"",
+        "programType":"JAVA"
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+
+## Python Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String|  task Id|
+2|type ||String |task type|PYTHON
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |rawScript |String| Python script|
+6| | localParams| Array|customized local parameters||
+7| | resourceList| Array|resource files||
+8|description | |String|description | |
+9|runFlag | |String |execution flag| |
+10|conditionResult | |Object|condition branch| |
+11| | successNode| Array|jump to node if success| |
+12| | failedNode|Array|jump to node if failure | 
+13| dependence| |Object |task dependency |mutual exclusion with params
+14|maxRetryTimes | |String|max retry times | |
+15|retryInterval | |String |retry interval| |
+16|timeout | |Object|timeout | |
+17| taskInstancePriority| |String|task priority | |
+18|workerGroup | |String |Worker group| |
+19|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"PYTHON",
+    "id":"tasks-5463",
+    "name":"Python Task",
+    "params":{
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "rawScript":"print("This is a python script")"
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+
+
+
+## Flink Node
+**The node data structure is as follows:**
+
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String|task Id|
+2|type ||String |task type|FLINK
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |mainClass |String |main class
+6| |mainArgs | String|execution arguments
+7| |others | String|other arguments
+8| |mainJar |Object |application jar package
+9| |deployMode |String |deployment mode |local,client,cluster
+10| |slot | String| slot count
+11| |taskManager |String | taskManager count
+12| |taskManagerMemory |String |taskManager memory size
+13| |jobManagerMemory |String | jobManager memory size
+14| |programType | String| program type|JAVA,SCALA,PYTHON
+15| | localParams| Array|local parameters
+16| | resourceList| Array|resource files
+17|description | |String|description | |
+18|runFlag | |String |execution flag| |
+19|conditionResult | |Object|condition branch| |
+20| | successNode| Array|jump node if success| |
+21| | failedNode|Array|jump node if failure| 
+22| dependence| |Object |task dependency |mutual exclusion with params
+23|maxRetryTimes | |String|max retry times| |
+24|retryInterval | |String |retry interval| |
+25|timeout | |Object|timeout | |
+26| taskInstancePriority| |String|task priority| |
+27|workerGroup | |String |Worker group| |
+38|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"FLINK",
+    "id":"tasks-17135",
+    "name":"FlinkTask",
+    "params":{
+        "mainClass":"com.flink.demo",
+        "mainJar":{
+            "id":6
+        },
+        "deployMode":"cluster",
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "slot":1,
+        "taskManager":"2",
+        "jobManagerMemory":"1G",
+        "taskManagerMemory":"2G",
+        "executorCores":2,
+        "mainArgs":"100",
+        "others":"",
+        "programType":"SCALA"
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+## HTTP Node
+**The node data structure is as follows:**
+
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String|task Id|
+2|type ||String |task type|HTTP
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |url |String |request url
+6| |httpMethod | String|http method|GET,POST,HEAD,PUT,DELETE
+7| | httpParams| Array|http parameters
+8| |httpCheckCondition | String|validation of HTTP code status|default code 200
+9| |condition |String |validation conditions
+10| | localParams| Array|customized local parameters
+11|description | |String|description| |
+12|runFlag | |String |execution flag| |
+13|conditionResult | |Object|condition branch| |
+14| | successNode| Array|jump node if success| |
+15| | failedNode|Array|jump node if failure| 
+16| dependence| |Object |task dependency |mutual exclusion with params
+17|maxRetryTimes | |String|max retry times | |
+18|retryInterval | |String |retry interval| |
+19|timeout | |Object|timeout | |
+20| taskInstancePriority| |String|task priority| |
+21|workerGroup | |String |Worker group| |
+22|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"HTTP",
+    "id":"tasks-60499",
+    "name":"HttpTask",
+    "params":{
+        "localParams":[
+
+        ],
+        "httpParams":[
+            {
+                "prop":"id",
+                "httpParametersType":"PARAMETER",
+                "value":"1"
+            },
+            {
+                "prop":"name",
+                "httpParametersType":"PARAMETER",
+                "value":"Bo"
+            }
+        ],
+        "url":"https://www.xxxxx.com:9012",
+        "httpMethod":"POST",
+        "httpCheckCondition":"STATUS_CODE_DEFAULT",
+        "condition":""
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+
+
+## DataX Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task Id|
+2|type ||String |task type|DATAX
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |customConfig |Int |specify whether use customized config| 0 none customized, 1 customized
+6| |dsType |String | datasource type
+7| |dataSource |Int | datasource ID
+8| |dtType | String|target database type
+9| |dataTarget | Int|target database ID 
+10| |sql |String | SQL statements
+11| |targetTable |String |target table
+12| |jobSpeedByte |Int |job speed limiting(bytes)
+13| |jobSpeedRecord | Int|job speed limiting(records)
+14| |preStatements | Array|preposition SQL
+15| | postStatements| Array|postposition SQL
+16| | json| String|customized configs|valid if customConfig=1
+17| | localParams| Array|customized parameters|valid if customConfig=1
+18|description | |String|description| |
+19|runFlag | |String |execution flag| |
+20|conditionResult | |Object|condition branch| |
+21| | successNode| Array|jump node if success| |
+22| | failedNode|Array|jump node if failure| 
+23| dependence| |Object |task dependency |mutual exclusion with params
+24|maxRetryTimes | |String|max retry times| |
+25|retryInterval | |String |retry interval| |
+26|timeout | |Object|timeout | |
+27| taskInstancePriority| |String|task priority| |
+28|workerGroup | |String |Worker group| |
+29|preTasks | |Array|preposition tasks| |
+
+
+
+**Node data example:**
+
+
+```bash
+{
+    "type":"DATAX",
+    "id":"tasks-91196",
+    "name":"DataxTask-DB",
+    "params":{
+        "customConfig":0,
+        "dsType":"MYSQL",
+        "dataSource":1,
+        "dtType":"MYSQL",
+        "dataTarget":1,
+        "sql":"select id, name ,age from user ",
+        "targetTable":"emp",
+        "jobSpeedByte":524288,
+        "jobSpeedRecord":500,
+        "preStatements":[
+            "truncate table emp "
+        ],
+        "postStatements":[
+            "truncate table user"
+        ]
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+```
+
+## Sqoop Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String|task ID|
+2|type ||String |task type|SQOOP
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| | concurrency| Int|concurrency rate
+6| | modelType|String |flow direction|import,export
+7| |sourceType|String |datasource type|
+8| |sourceParams |String|datasource parameters| JSON format
+9| | targetType|String |target datasource type
+10| |targetParams | String|target datasource parameters|JSON format
+11| |localParams |Array |customized local parameters
+12|description | |String|description| |
+13|runFlag | |String |execution flag| |
+14|conditionResult | |Object|condition branch| |
+15| | successNode| Array|jump node if success| |
+16| | failedNode|Array|jump node if failure| 
+17| dependence| |Object |task dependency |mutual exclusion with params
+18|maxRetryTimes | |String|max retry times| |
+19|retryInterval | |String |retry interval| |
+20|timeout | |Object|timeout | |
+21| taskInstancePriority| |String|task priority| |
+22|workerGroup | |String |Worker group| |
+23|preTasks | |Array|preposition tasks| |
+
+
+
+
+**Node data example:**
+
+```bash
+{
+            "type":"SQOOP",
+            "id":"tasks-82041",
+            "name":"Sqoop Task",
+            "params":{
+                "concurrency":1,
+                "modelType":"import",
+                "sourceType":"MYSQL",
+                "targetType":"HDFS",
+                "sourceParams":"{"srcType":"MYSQL","srcDatasource":1,"srcTable":"","srcQueryType":"1","srcQuerySql":"selec id , name from user","srcColumnType":"0","srcColumns":"","srcConditionList":[],"mapColumnHive":[{"prop":"hivetype-key","direct":"IN","type":"VARCHAR","value":"hivetype-value"}],"mapColumnJava":[{"prop":"javatype-key","direct":"IN","type":"VARCHAR","value":"javatype-value"}]}",
+                "targetParams":"{"targetPath":"/user/hive/warehouse/ods.db/user","deleteTargetDir":false,"fileType":"--as-avrodatafile","compressionCodec":"snappy","fieldsTerminated":",","linesTerminated":"@"}",
+                "localParams":[
+
+                ]
+            },
+            "description":"",
+            "runFlag":"NORMAL",
+            "conditionResult":{
+                "successNode":[
+                    ""
+                ],
+                "failedNode":[
+                    ""
+                ]
+            },
+            "dependence":{
+
+            },
+            "maxRetryTimes":"0",
+            "retryInterval":"1",
+            "timeout":{
+                "strategy":"",
+                "interval":null,
+                "enable":false
+            },
+            "taskInstancePriority":"MEDIUM",
+            "workerGroup":"default",
+            "preTasks":[
+
+            ]
+        }
+```
+
+## Condition Branch Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task ID|
+2|type ||String |task type |SHELL
+3| name| |String|task name |
+4| params| |Object|customized parameters | null
+5|description | |String|description| |
+6|runFlag | |String |execution flag| |
+7|conditionResult | |Object|condition branch | |
+8| | successNode| Array|jump to node if success| |
+9| | failedNode|Array|jump to node if failure| 
+10| dependence| |Object |task dependency |mutual exclusion with params
+11|maxRetryTimes | |String|max retry times | |
+12|retryInterval | |String |retry interval| |
+13|timeout | |Object|timeout | |
+14| taskInstancePriority| |String|task priority | |
+15|workerGroup | |String |Worker group| |
+16|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+    "type":"CONDITIONS",
+    "id":"tasks-96189",
+    "name":"条件",
+    "params":{
+
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            "test04"
+        ],
+        "failedNode":[
+            "test05"
+        ]
+    },
+    "dependence":{
+        "relation":"AND",
+        "dependTaskList":[
+
+        ]
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+        "test01",
+        "test02"
+    ]
+}
+```
+
+
+## Subprocess Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task ID|
+2|type ||String |task type|SHELL
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |processDefinitionId |Int| process definition ID
+6|description | |String|description | |
+7|runFlag | |String |execution flag| |
+8|conditionResult | |Object|condition branch | |
+9| | successNode| Array|jump to node if success| |
+10| | failedNode|Array|jump to node if failure| 
+11| dependence| |Object |task dependency |mutual exclusion with params
+12|maxRetryTimes | |String|max retry times| |
+13|retryInterval | |String |retry interval| |
+14|timeout | |Object|timeout| |
+15| taskInstancePriority| |String|task priority| |
+16|workerGroup | |String |Worker group| |
+17|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+            "type":"SUB_PROCESS",
+            "id":"tasks-14806",
+            "name":"SubProcessTask",
+            "params":{
+                "processDefinitionId":2
+            },
+            "description":"",
+            "runFlag":"NORMAL",
+            "conditionResult":{
+                "successNode":[
+                    ""
+                ],
+                "failedNode":[
+                    ""
+                ]
+            },
+            "dependence":{
+
+            },
+            "timeout":{
+                "strategy":"",
+                "interval":null,
+                "enable":false
+            },
+            "taskInstancePriority":"MEDIUM",
+            "workerGroup":"default",
+            "preTasks":[
+
+            ]
+        }
+```
+
+
+
+## DEPENDENT Node
+**The node data structure is as follows:**
+No.|parameter name||type|description |notes
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| task ID|
+2|type ||String |task type|DEPENDENT
+3| name| |String|task name|
+4| params| |Object|customized parameters |Json format
+5| |rawScript |String|Shell script|
+6| | localParams| Array|customized local parameters||
+7| | resourceList| Array|resource files||
+8|description | |String|description| |
+9|runFlag | |String |execution flag| |
+10|conditionResult | |Object|condition branch| |
+11| | successNode| Array|jump to node if success| |
+12| | failedNode|Array|jump to node if failure| 
+13| dependence| |Object |task dependency |mutual exclusion with params
+14| | relation|String |relation|AND,OR
+15| | dependTaskList|Array |dependent task list|
+16|maxRetryTimes | |String|max retry times| |
+17|retryInterval | |String |retry interval| |
+18|timeout | |Object|timeout| |
+19| taskInstancePriority| |String|task priority| |
+20|workerGroup | |String |Worker group| |
+21|preTasks | |Array|preposition tasks| |
+
+
+**Node data example:**
+
+```bash
+{
+            "type":"DEPENDENT",
+            "id":"tasks-57057",
+            "name":"DenpendentTask",
+            "params":{
+
+            },
+            "description":"",
+            "runFlag":"NORMAL",
+            "conditionResult":{
+                "successNode":[
+                    ""
+                ],
+                "failedNode":[
+                    ""
+                ]
+            },
+            "dependence":{
+                "relation":"AND",
+                "dependTaskList":[
+                    {
+                        "relation":"AND",
+                        "dependItemList":[
+                            {
+                                "projectId":1,
+                                "definitionId":7,
+                                "definitionList":[
+                                    {
+                                        "value":8,
+                                        "label":"MRTask"
+                                    },
+                                    {
+                                        "value":7,
+                                        "label":"FlinkTask"
+                                    },
+                                    {
+                                        "value":6,
+                                        "label":"SparkTask"
+                                    },
+                                    {
+                                        "value":5,
+                                        "label":"SqlTask-Update"
+                                    },
+                                    {
+                                        "value":4,
+                                        "label":"SqlTask-Query"
+                                    },
+                                    {
+                                        "value":3,
+                                        "label":"SubProcessTask"
+                                    },
+                                    {
+                                        "value":2,
+                                        "label":"Python Task"
+                                    },
+                                    {
+                                        "value":1,
+                                        "label":"Shell Task"
+                                    }
+                                ],
+                                "depTasks":"ALL",
+                                "cycle":"day",
+                                "dateValue":"today"
+                            }
+                        ]
+                    },
+                    {
+                        "relation":"AND",
+                        "dependItemList":[
+                            {
+                                "projectId":1,
+                                "definitionId":5,
+                                "definitionList":[
+                                    {
+                                        "value":8,
+                                        "label":"MRTask"
+                                    },
+                                    {
+                                        "value":7,
+                                        "label":"FlinkTask"
+                                    },
+                                    {
+                                        "value":6,
+                                        "label":"SparkTask"
+                                    },
+                                    {
+                                        "value":5,
+                                        "label":"SqlTask-Update"
+                                    },
+                                    {
+                                        "value":4,
+                                        "label":"SqlTask-Query"
+                                    },
+                                    {
+                                        "value":3,
+                                        "label":"SubProcessTask"
+                                    },
+                                    {
+                                        "value":2,
+                                        "label":"Python Task"
+                                    },
+                                    {
+                                        "value":1,
+                                        "label":"Shell Task"
+                                    }
+                                ],
+                                "depTasks":"SqlTask-Update",
+                                "cycle":"day",
+                                "dateValue":"today"
+                            }
+                        ]
+                    }
+                ]
+            },
+            "maxRetryTimes":"0",
+            "retryInterval":"1",
+            "timeout":{
+                "strategy":"",
+                "interval":null,
+                "enable":false
+            },
+            "taskInstancePriority":"MEDIUM",
+            "workerGroup":"default",
+            "preTasks":[
+
+            ]
+        }
+```
diff --git a/docs/2.0.9/docs/en/contribute/api-standard.md b/docs/2.0.9/docs/en/contribute/api-standard.md
new file mode 100644
index 0000000..61d6622
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/api-standard.md
@@ -0,0 +1,108 @@
+# API design standard
+A standardized and unified API is the cornerstone of project design.The API of DolphinScheduler follows the REST ful standard. REST ful is currently the most popular Internet software architecture. It has a clear structure, conforms to standards, is easy to understand and extend.
+
+This article uses the DolphinScheduler API as an example to explain how to construct a Restful API.
+
+## 1. URI design
+REST is "Representational State Transfer".The design of Restful URI is based on resources.The resource corresponds to an entity on the network, for example: a piece of text, a picture, and a service. And each resource corresponds to a URI.
+
++ One Kind of Resource: expressed in the plural, such as `task-instances`、`groups` ;
++ A Resource: expressed in the singular, or use the ID to represent the corresponding resource, such as `group`、`groups/{groupId}`;
++ Sub Resources: Resources under a certain resource, such as `/instances/{instanceId}/tasks`;
++ A Sub Resource:`/instances/{instanceId}/tasks/{taskId}`;
+
+## 2. Method design
+We need to locate a certain resource by URI, and then use Method or declare actions in the path suffix to reflect the operation of the resource.
+
+### ① Query - GET
+Use URI to locate the resource, and use GET to indicate query.
+
++ When the URI is a type of resource, it means to query a type of resource. For example, the following example indicates paging query `alter-groups`.
+```
+Method: GET
+/dolphinscheduler/alert-groups
+```
+
++ When the URI is a single resource, it means to query this resource. For example, the following example means to query the specified `alter-group`.
+```
+Method: GET
+/dolphinscheduler/alter-groups/{id}
+```
+
++ In addition, we can also express query sub-resources based on URI, as follows:
+```
+Method: GET
+/dolphinscheduler/projects/{projectId}/tasks
+```
+
+**The above examples all represent paging query. If we need to query all data, we need to add `/list` after the URI to distinguish. Do not mix the same API for both paged query and query.**
+```
+Method: GET
+/dolphinscheduler/alert-groups/list
+```
+
+### ② Create - POST
+Use URI to locate the resource, use POST to indicate create, and then return the created id to requester.
+
++ create an `alter-group`:
+
+```
+Method: POST
+/dolphinscheduler/alter-groups
+```
+
++ create sub-resources is also the same as above.
+```
+Method: POST
+/dolphinscheduler/alter-groups/{alterGroupId}/tasks
+```
+
+### ③ Modify - PUT
+Use URI to locate the resource, use PUT to indicate modify.
++ modify an `alert-group`
+```
+Method: PUT
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ④ Delete -DELETE
+Use URI to locate the resource, use DELETE to indicate delete.
+
++ delete an `alert-group`
+```
+Method: DELETE
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
++ batch deletion: batch delete the id array,we should use POST. **(Do not use the DELETE method, because the body of the DELETE request has no semantic meaning, and it is possible that some gateways, proxies, and firewalls will directly strip off the request body after receiving the DELETE request.)**
+```
+Method: POST
+/dolphinscheduler/alter-groups/batch-delete
+```
+
+### ⑤ Partial Modifications -PATCH
+Use URI to locate the resource, use PATCH to partial modifications.
+
+```
+Method: PATCH
+/dolphinscheduler/alter-groups/{alterGroupId}
+```
+
+### ⑥ Others
+In addition to creating, deleting, modifying and quering, we also locate the corresponding resource through url, and then append operations to it after the path, such as:
+```
+/dolphinscheduler/alert-groups/verify-name
+/dolphinscheduler/projects/{projectCode}/process-instances/{code}/view-gantt
+```
+
+## 3. Parameter design
+There are two types of parameters, one is request parameter and the other is path parameter. And the parameter must use small hump.
+
+In the case of paging, if the parameter entered by the user is less than 1, the front end needs to automatically turn to 1, indicating that the first page is requested; When the backend finds that the parameter entered by the user is greater than the total number of pages, it should directly return to the last page.
+
+## 4. Others design
+### base URL
+The URI of the project needs to use `/<project_name>` as the base path, so as to identify that these APIs are under this project.
+```
+/dolphinscheduler
+```
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/architecture-design.md b/docs/2.0.9/docs/en/contribute/architecture-design.md
new file mode 100644
index 0000000..a46bfb2
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/architecture-design.md
@@ -0,0 +1,315 @@
+## Architecture Design
+Before explaining the architecture of the schedule system, let us first understand the common nouns of the schedule system.
+
+### 1.Noun Interpretation
+
+**DAG:** Full name Directed Acyclic Graph,referred to as DAG。Tasks in the workflow are assembled in the form of directed acyclic graphs, which are topologically traversed from nodes with zero indegrees of ingress until there are no successor nodes. For example, the following picture:
+
+<p align="center">
+  <img src="../../../img/architecture-design/dag_examples.png" alt="dag示例"  width="80%" />
+  <p align="center">
+        <em>dag example</em>
+  </p>
+</p>
+
+**Process definition**: Visualization **DAG** by dragging task nodes and establishing associations of task nodes 
+
+**Process instance**: A process instance is an instantiation of a process definition, which can be generated by manual startup or  scheduling. The process definition runs once, a new process instance is generated
+
+**Task instance**: A task instance is the instantiation of a specific task node when a process instance runs, which indicates the specific task execution status
+
+**Task type**: Currently supports SHELL, SQL, SUB_PROCESS (sub-process), PROCEDURE, MR, SPARK, PYTHON, DEPENDENT (dependency), and plans to support dynamic plug-in extension, note: the sub-**SUB_PROCESS** is also A separate process definition that can be launched separately
+
+**Schedule mode** :  The system supports timing schedule and manual schedule based on cron expressions. Command type support: start workflow, start execution from current node, resume fault-tolerant workflow, resume pause process, start execution from failed node, complement, timer, rerun, pause, stop, resume waiting thread. Where **recovers the fault-tolerant workflow** and **restores the waiting thread** The two command types are used by the scheduling internal control and cannot be called externally
+
+**Timed schedule**: The system uses **quartz** distributed scheduler and supports the generation of cron expression visualization
+
+**Dependency**: The system does not only support **DAG** Simple dependencies between predecessors and successor nodes, but also provides **task dependencies** nodes, support for **custom task dependencies between processes**
+
+**Priority**: Supports the priority of process instances and task instances. If the process instance and task instance priority are not set, the default is first in, first out.
+
+**Mail Alert**: Support **SQL Task** Query Result Email Send, Process Instance Run Result Email Alert and Fault Tolerant Alert Notification
+
+**Failure policy**: For tasks running in parallel, if there are tasks that fail, two failure policy processing methods are provided. **Continue** means that the status of the task is run in parallel until the end of the process failure. **End** means that once a failed task is found, Kill also drops the running parallel task and the process ends.
+
+**Complement**: Complement historical data, support **interval parallel and serial** two complement methods
+
+
+
+### 2.System architecture
+
+#### 2.1 System Architecture Diagram
+<p align="center">
+  <img src="../../../img/architecture.jpg" alt="System Architecture Diagram"  />
+  <p align="center">
+        <em>System Architecture Diagram</em>
+  </p>
+</p>
+
+
+
+#### 2.2 Architectural description
+
+* **MasterServer** 
+
+    MasterServer adopts the distributed non-central design concept. MasterServer is mainly responsible for DAG task split, task submission monitoring, and monitoring the health status of other MasterServer and WorkerServer.
+    When the MasterServer service starts, it registers a temporary node with Zookeeper, and listens to the Zookeeper temporary node state change for fault tolerance processing.
+
+    
+
+    ##### The service mainly contains:
+
+    - **Distributed Quartz** distributed scheduling component, mainly responsible for the start and stop operation of the scheduled task. When the quartz picks up the task, the master internally has a thread pool to be responsible for the subsequent operations of the task.
+
+    - **MasterSchedulerThread** is a scan thread that periodically scans the **command** table in the database for different business operations based on different **command types**
+
+    - **MasterExecThread** is mainly responsible for DAG task segmentation, task submission monitoring, logic processing of various command types
+
+    - **MasterTaskExecThread** is mainly responsible for task persistence
+
+      
+
+* **WorkerServer** 
+
+     - WorkerServer also adopts a distributed, non-central design concept. WorkerServer is mainly responsible for task execution and providing log services. When the WorkerServer service starts, it registers the temporary node with Zookeeper and maintains the heartbeat.
+
+       ##### This service contains:
+
+       - **FetchTaskThread** is mainly responsible for continuously receiving tasks from **Task Queue** and calling **TaskScheduleThread** corresponding executors according to different task types.
+
+     - **ZooKeeper**
+
+       The ZooKeeper service, the MasterServer and the WorkerServer nodes in the system all use the ZooKeeper for cluster management and fault tolerance. In addition, the system also performs event monitoring and distributed locking based on ZooKeeper.
+       We have also implemented queues based on Redis, but we hope that DolphinScheduler relies on as few components as possible, so we finally removed the Redis implementation.
+
+     - **Task Queue**
+
+       The task queue operation is provided. Currently, the queue is also implemented based on Zookeeper. Since there is less information stored in the queue, there is no need to worry about too much data in the queue. In fact, we have over-measured a million-level data storage queue, which has no effect on system stability and performance.
+
+     - **Alert**
+
+       Provides alarm-related interfaces. The interfaces mainly include **Alarms**. The storage, query, and notification functions of the two types of alarm data. The notification function has two types: **mail notification** and **SNMP (not yet implemented)**.
+
+     - **API**
+
+       The API interface layer is mainly responsible for processing requests from the front-end UI layer. The service provides a RESTful api to provide request services externally.
+       Interfaces include workflow creation, definition, query, modification, release, offline, manual start, stop, pause, resume, start execution from this node, and more.
+
+     - **UI**
+
+       The front-end page of the system provides various visual operation interfaces of the system. For details, see the [quick start](https://dolphinscheduler.apache.org/en-us/docs/latest/user_doc/about/introduction.html) section.
+
+     
+
+#### 2.3 Architectural Design Ideas
+
+##### I. Decentralized vs centralization
+
+###### Centralization Thought
+
+The centralized design concept is relatively simple. The nodes in the distributed cluster are divided into two roles according to their roles:
+
+<p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave role" width="50%" />
+ </p>
+
+- The role of Master is mainly responsible for task distribution and supervising the health status of Slave. It can dynamically balance the task to Slave, so that the Slave node will not be "busy" or "free".
+- The role of the Worker is mainly responsible for the execution of the task and maintains the heartbeat with the Master so that the Master can assign tasks to the Slave.
+
+Problems in the design of centralized :
+
+- Once the Master has a problem, the group has no leader and the entire cluster will crash. In order to solve this problem, most Master/Slave architecture modes adopt the design scheme of the master and backup masters, which can be hot standby or cold standby, automatic switching or manual switching, and more and more new systems are available. Automatically elects the ability to switch masters to improve system availability.
+- Another problem is that if the Scheduler is on the Master, although it can support different tasks in one DAG running on different machines, it will generate overload of the Master. If the Scheduler is on the Slave, all tasks in a DAG can only be submitted on one machine. If there are more parallel tasks, the pressure on the Slave may be larger.
+
+###### Decentralization
+
+ <p align="center"
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="decentralized" width="50%" />
+ </p>
+
+- In the decentralized design, there is usually no Master/Slave concept, all roles are the same, the status is equal, the global Internet is a typical decentralized distributed system, networked arbitrary node equipment down machine , all will only affect a small range of features.
+- The core design of decentralized design is that there is no "manager" that is different from other nodes in the entire distributed system, so there is no single point of failure problem. However, since there is no "manager" node, each node needs to communicate with other nodes to get the necessary machine information, and the unreliable line of distributed system communication greatly increases the difficulty of implementing the above functions.
+- In fact, truly decentralized distributed systems are rare. Instead, dynamic centralized distributed systems are constantly emerging. Under this architecture, the managers in the cluster are dynamically selected, rather than preset, and when the cluster fails, the nodes of the cluster will spontaneously hold "meetings" to elect new "managers". Go to preside over the work. The most typical case is the Etcd implemented in ZooKeeper and Go.
+
+- Decentralization of DolphinScheduler is the registration of Master/Worker to ZooKeeper. The Master Cluster and the Worker Cluster are not centered, and the Zookeeper distributed lock is used to elect one Master or Worker as the “manager” to perform the task.
+
+#####  二、Distributed lock practice
+
+DolphinScheduler uses ZooKeeper distributed locks to implement only one Master to execute the Scheduler at the same time, or only one Worker to perform task submission.
+
+1. The core process algorithm for obtaining distributed locks is as follows
+
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock.png" alt="Get Distributed Lock Process" width="70%" />
+ </p>
+
+2. Scheduler thread distributed lock implementation flow chart in DolphinScheduler:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/distributed_lock_procss.png" alt="Get Distributed Lock Process" />
+ </p>
+
+##### Third, the thread is insufficient loop waiting problem
+
+- If there is no subprocess in a DAG, if the number of data in the Command is greater than the threshold set by the thread pool, the direct process waits or fails.
+- If a large number of sub-processes are nested in a large DAG, the following figure will result in a "dead" state:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/lack_thread.png" alt="Thread is not enough to wait for loop" width="70%" />
+ </p>
+
+In the above figure, MainFlowThread waits for SubFlowThread1 to end, SubFlowThread1 waits for SubFlowThread2 to end, SubFlowThread2 waits for SubFlowThread3 to end, and SubFlowThread3 waits for a new thread in the thread pool, then the entire DAG process cannot end, and thus the thread cannot be released. This forms the state of the child parent process loop waiting. At this point, the scheduling cluster will no longer be available unless a new Master is started to add threads to break such a "stuck."
+
+It seems a bit unsatisfactory to start a new Master to break the deadlock, so we proposed the following three options to reduce this risk:
+
+1. Calculate the sum of the threads of all Masters, and then calculate the number of threads required for each DAG, that is, pre-calculate before the DAG process is executed. Because it is a multi-master thread pool, the total number of threads is unlikely to be obtained in real time.
+2. Judge the single master thread pool. If the thread pool is full, let the thread fail directly.
+3. Add a Command type with insufficient resources. If the thread pool is insufficient, the main process will be suspended. This way, the thread pool has a new thread, which can make the process with insufficient resources hang up and wake up again.
+
+Note: The Master Scheduler thread is FIFO-enabled when it gets the Command.
+
+So we chose the third way to solve the problem of insufficient threads.
+
+##### IV. Fault Tolerant Design
+
+Fault tolerance is divided into service fault tolerance and task retry. Service fault tolerance is divided into two types: Master Fault Tolerance and Worker Fault Tolerance.
+
+###### 1. Downtime fault tolerance
+
+Service fault tolerance design relies on ZooKeeper's Watcher mechanism. The implementation principle is as follows:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant.png" alt="DolphinScheduler Fault Tolerant Design" width="70%" />
+ </p>
+
+The Master monitors the directories of other Masters and Workers. If the remove event is detected, the process instance is fault-tolerant or the task instance is fault-tolerant according to the specific business logic.
+
+
+
+- Master fault tolerance flow chart:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_master.png" alt="Master Fault Tolerance Flowchart" width="70%" />
+ </p>
+
+After the ZooKeeper Master is fault-tolerant, it is rescheduled by the Scheduler thread in DolphinScheduler. It traverses the DAG to find the "Running" and "Submit Successful" tasks, and monitors the status of its task instance for the "Running" task. You need to determine whether the Task Queue already exists. If it exists, monitor the status of the task instance. If it does not exist, resubmit the task instance.
+
+
+
+- Worker fault tolerance flow chart:
+
+ <p align="center">
+   <img src="../../../img/architecture-design/fault-tolerant_worker.png" alt="Worker Fault Tolerance Flowchart" width="70%" />
+ </p>
+
+Once the Master Scheduler thread finds the task instance as "need to be fault tolerant", it takes over the task and resubmits.
+
+ Note: Because the "network jitter" may cause the node to lose the heartbeat of ZooKeeper in a short time, the node's remove event occurs. In this case, we use the easiest way, that is, once the node has timeout connection with ZooKeeper, it will directly stop the Master or Worker service.
+
+###### 2. Task failure retry
+
+Here we must first distinguish between the concept of task failure retry, process failure recovery, and process failure rerun:
+
+- Task failure Retry is task level, which is automatically performed by the scheduling system. For example, if a shell task sets the number of retries to 3 times, then the shell task will try to run up to 3 times after failing to run.
+- Process failure recovery is process level, is done manually, recovery can only be performed **from the failed node** or **from the current node**
+- Process failure rerun is also process level, is done manually, rerun is from the start node
+
+
+
+Next, let's talk about the topic, we divided the task nodes in the workflow into two types.
+
+- One is a business node, which corresponds to an actual script or processing statement, such as a Shell node, an MR node, a Spark node, a dependent node, and so on.
+- There is also a logical node, which does not do the actual script or statement processing, but the logical processing of the entire process flow, such as sub-flow sections.
+
+Each **service node** can configure the number of failed retries. When the task node fails, it will automatically retry until it succeeds or exceeds the configured number of retries. **Logical node** does not support failed retry. But the tasks in the logical nodes support retry.
+
+If there is a task failure in the workflow that reaches the maximum number of retries, the workflow will fail to stop, and the failed workflow can be manually rerun or process resumed.
+
+
+
+##### V. Task priority design
+
+In the early scheduling design, if there is no priority design and fair scheduling design, it will encounter the situation that the task submitted first may be completed simultaneously with the task submitted subsequently, but the priority of the process or task cannot be set. We have redesigned this, and we are currently designing it as follows:
+
+- According to **different process instance priority** prioritizes **same process instance priority** prioritizes **task priority within the same process** takes precedence over **same process** commit order from high Go to low for task processing.
+
+  - The specific implementation is to resolve the priority according to the json of the task instance, and then save the **process instance priority _ process instance id_task priority _ task id** information in the ZooKeeper task queue, when obtained from the task queue, Through string comparison, you can get the task that needs to be executed first.
+
+    - The priority of the process definition is that some processes need to be processed before other processes. This can be configured at the start of the process or at the time of scheduled start. There are 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
+
+      <p align="center">
+         <img src="../../../img/architecture-design/process_priority.png" alt="Process Priority Configuration" width="40%" />
+       </p>
+
+    - The priority of the task is also divided into 5 levels, followed by HIGHEST, HIGH, MEDIUM, LOW, and LOWEST. As shown below
+
+      <p align="center">`
+         <img src="../../../img/architecture-design/task_priority.png" alt="task priority configuration" width="35%" />
+       </p>
+
+##### VI. Logback and gRPC implement log access
+
+- Since the Web (UI) and Worker are not necessarily on the same machine, viewing the log is not as it is for querying local files. There are two options:
+  - Put the logs on the ES search engine
+  - Obtain remote log information through gRPC communication
+- Considering the lightweightness of DolphinScheduler as much as possible, gRPC was chosen to implement remote access log information.
+
+ <p align="center">
+   <img src="../../../img/architecture-design/grpc.png" alt="grpc remote access" width="50%" />
+ </p>
+
+- We use a custom Logback FileAppender and Filter function to generate a log file for each task instance.
+- The main implementation of FileAppender is as follows:
+
+```java
+ /**
+  * task log appender
+  */
+ Public class TaskLogAppender extends FileAppender<ILoggingEvent> {
+ 
+     ...
+
+    @Override
+    Protected void append(ILoggingEvent event) {
+
+        If (currentlyActiveFile == null){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        // thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split("-");
+        // logId = processDefineId_processInstanceId_taskInstanceId
+        String logId = threadNameArr[1];
+        ...
+        super.subAppend(event);
+    }
+}
+```
+
+Generate a log in the form of /process definition id/process instance id/task instance id.log
+
+- Filter matches the thread name starting with TaskLogInfo:
+- TaskLogFilter is implemented as follows:
+
+```java
+ /**
+ * task log filter
+ */
+Public class TaskLogFilter extends Filter<ILoggingEvent> {
+
+    @Override
+    Public FilterReply decide(ILoggingEvent event) {
+        If (event.getThreadName().startsWith("TaskLogInfo-")){
+            Return FilterReply.ACCEPT;
+        }
+        Return FilterReply.DENY;
+    }
+}
+```
+
+
+
+### summary
+
+Starting from the scheduling, this paper introduces the architecture principle and implementation ideas of the big data distributed workflow scheduling system-DolphinScheduler. To be continued
diff --git a/docs/2.0.9/docs/en/contribute/backend/mechanism/global-parameter.md b/docs/2.0.9/docs/en/contribute/backend/mechanism/global-parameter.md
new file mode 100644
index 0000000..53b7374
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/mechanism/global-parameter.md
@@ -0,0 +1,61 @@
+# Global Parameter development document
+
+After the user defines the parameter with the direction OUT, it is saved in the localParam of the task.
+
+## Usage of parameters
+
+Getting the direct predecessor node `preTasks` of the current `taskInstance` to be created from the DAG, get the `varPool` of `preTasks`, merge this varPool (List) into one `varPool`, and in the merging process, if parameters with the same parameter name are found, they will be handled according to the following logics:
+
+* If all the values are null, the merged value is null
+* If one and only one value is non-null, then the merged value is the non-null value
+* If all the values are not null, it would be the earliest value of the endtime of taskInstance taken by VarPool.
+
+The direction of all the merged properties is updated to IN during the merge process.
+
+The result of the merge is saved in taskInstance.varPool.
+
+The worker receives and parses the varPool into the format of `Map<String,Property>`, where the key of the map is property.prop, which is the parameter name.
+
+When the processor processes the parameters, it will merge the varPool and localParam and globalParam parameters, and if there are parameters with duplicate names during the merging process, they will be replaced according to the following priorities, with the higher priority being retained and the lower priority being replaced:
+
+* globalParam: high
+* varPool: middle
+* localParam: low
+
+The parameters are replaced with the corresponding values using regular expressions compared to ${parameter name} before the node content is executed.
+
+## Parameter setting
+
+Currently, only SQL and SHELL nodes are supported to get parameters.
+
+Get the parameter with direction OUT from localParam, and do the following way according to the type of different nodes.
+
+### SQL node
+
+The structure returned by the parameter is List<Map<String,String>>, where the elements of List are each row of data, the key of Map is the column name, and the value is the value corresponding to the column.
+
+* If the SQL statement returns one row of data, match the OUT parameter name based on the OUT parameter name defined by the user when defining the task, or discard it if it does not match.
+* If the SQL statement returns multiple rows of data, the column names are matched based on the OUT parameter names defined by the user when defining the task of type LIST. All rows of the corresponding column are converted to `List<String>` as the value of this parameter. If there is no match, it is discarded.
+
+### SHELL node
+
+The result of the processor execution is returned as `Map<String,String>`.
+
+The user needs to define `${setValue(key=value)}` in the output when defining the shell script.
+
+Remove `${setValue()}` when processing parameters, split by "=", with the 0th being the key and the 1st being the value.
+
+Similarly match the OUT parameter name and key defined by the user when defining the task, and use value as the value of that parameter.
+
+Return parameter processing
+
+* The result of acquired Processor is String.
+* Determine whether the processor is empty or not, and exit if it is empty.
+* Determine whether the localParam is empty or not, and exit if it is empty.
+* Get the parameter of localParam which is OUT, and exit if it is empty.
+* Format String as per appeal format (`List<Map<String,String>>` for SQL, `Map<String,String>>` for shell).
+
+Assign the parameters with matching values to varPool (List, which contains the original IN's parameters)
+
+* Format the varPool as json and pass it to master.
+* The parameters that are OUT would be written into the localParam after the master has received the varPool.
diff --git a/docs/2.0.9/docs/en/contribute/backend/mechanism/overview.md b/docs/2.0.9/docs/en/contribute/backend/mechanism/overview.md
new file mode 100644
index 0000000..4f0d592
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/mechanism/overview.md
@@ -0,0 +1,6 @@
+# Overview
+
+<!-- TODO Since the side menu does not support multiple levels, add new page to keep all sub page here -->
+
+* [Global Parameter](global-parameter.md)
+* [Switch Task type](task/switch.md)
diff --git a/docs/2.0.9/docs/en/contribute/backend/mechanism/task/switch.md b/docs/2.0.9/docs/en/contribute/backend/mechanism/task/switch.md
new file mode 100644
index 0000000..4905104
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/mechanism/task/switch.md
@@ -0,0 +1,8 @@
+# SWITCH Task development
+
+Switch task workflow step as follows
+
+* User-defined expressions and branch information are stored in `taskParams` in `taskdefinition`. When the switch is executed, it will be formatted as `SwitchParameters`
+* `SwitchTaskExecThread` processes the expressions defined in `switch` from top to bottom, obtains the value of the variable from `varPool`, and parses the expression through `javascript`. If the expression returns true, stop checking and record The order of the expression, here we record as resultConditionLocation. The task of SwitchTaskExecThread is over
+* After the `switch` task runs, if there is no error (more commonly, the user-defined expression is out of specification or there is a problem with the parameter name), then `MasterExecThread.submitPostNode` will obtain the downstream node of the `DAG` to continue execution.
+* If it is found in `DagHelper.parsePostNodes` that the current node (the node that has just completed the work) is a `switch` node, the `resultConditionLocation` will be obtained, and all branches except `resultConditionLocation` in the SwitchParameters will be skipped. In this way, only the branches that need to be executed are left
diff --git a/docs/2.0.9/docs/en/contribute/backend/spi/alert.md b/docs/2.0.9/docs/en/contribute/backend/spi/alert.md
new file mode 100644
index 0000000..e2629a8
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/spi/alert.md
@@ -0,0 +1,101 @@
+### DolphinScheduler Alert SPI main design
+
+#### DolphinScheduler SPI Design
+
+DolphinScheduler is undergoing a microkernel + plug-in architecture change. All core capabilities such as tasks, resource storage, registration centers, etc. will be designed as extension points. We hope to use SPI to improve DolphinScheduler’s own flexibility and friendliness (extended sex).
+
+For alarm-related codes, please refer to the `dolphinscheduler-alert-api` module. This module defines the extension interface of the alarm plug-in and some basic codes. When we need to realize the plug-inization of related functions, it is recommended to read the code of this block first. Of course, it is recommended that you read the document. This will reduce a lot of time, but the document There is a certain degree of lag. When the document is missing, it is recommended to take the source code as the standard (if you are interested, we also welcome you to submit related documents). In addition, we will hardly make changes to the extended interface (excluding new additions) , Unless there is a major structural adjustment, there is an incompatible upgrade version, so the existing documents can generally be satisfied.
+
+We use the native JAVA-SPI, when you need to extend, in fact, you only need to pay attention to the extension of the `org.apache.dolphinscheduler.alert.api.AlertChannelFactory` interface, the underlying logic such as plug-in loading, and other kernels have been implemented, Which makes our development more focused and simple.
+
+By the way, we have adopted an excellent front-end component form-create, which supports the generation of front-end UI components based on JSON. If plug-in development involves the front-end, we will use JSON to generate related front-end UI components, org.apache.dolphinscheduler. The parameters of the plug-in are encapsulated in spi.params, which will convert all the relevant parameters into the corresponding JSON, which means that you can complete the drawing of the front-end components by way of Java code (here is mainly the form, we only care Data exchanged between the front and back ends).
+
+This article mainly focuses on the design and development of Alert.
+
+#### Main Modules
+
+If you don't care about its internal design, but simply want to know how to develop your own alarm plug-in, you can skip this content.
+
+* dolphinscheduler-alert-api
+
+  This module is the core module of ALERT SPI. This module defines the interface of the alarm plug-in extension and some basic codes. The extension plug-in must implement the interface defined by this module: `org.apache.dolphinscheduler.alert.api.AlertChannelFactory`
+
+* dolphinscheduler-alert-plugins
+
+  This module is currently a plug-in provided by us, and now we have supported dozens of plug-ins, such as Email, DingTalk, Script, etc.
+
+
+#### Alert SPI Main class information.
+AlertChannelFactory
+Alarm plug-in factory interface. All alarm plug-ins need to implement this interface. This interface is used to define the name of the alarm plug-in and the required parameters. The create method is used to create a specific alarm plug-in instance.
+
+AlertChannel
+The interface of the alert plug-in. The alert plug-in needs to implement this interface. There is only one method process in this interface. The upper-level alert system will call this method and obtain the return information of the alert through the AlertResult returned by this method.
+
+AlertData
+Alarm content information, including id, title, content, log.
+
+AlertInfo
+For alarm-related information, when the upper-level system calls an instance of the alarm plug-in, the instance of this class is passed to the specific alarm plug-in through the process method. It contains the alert content AlertData and the parameter information filled in by the front end of the called alert plug-in instance.
+
+AlertResult
+The alarm plug-in sends alarm return information.
+
+org.apache.dolphinscheduler.spi.params
+This package is a plug-in parameter definition. Our front-end uses the from-create front-end library http://www.form-create.com, which can dynamically generate the front-end UI based on the parameter list json returned by the plug-in definition, so We don't need to care about the front end when we are doing SPI plug-in development.
+
+Under this package, we currently only encapsulate RadioParam, TextParam, and PasswordParam, which are used to define text type parameters, radio parameters and password type parameters, respectively.
+
+AbsPluginParams This class is the base class of all parameters, RadioParam these classes all inherit this class. Each DS alert plug-in will return a list of AbsPluginParams in the implementation of AlertChannelFactory.
+
+The specific design of alert_spi can be seen in the issue: [Alert Plugin Design](https://github.com/apache/incubator-dolphinscheduler/issues/3049)
+
+#### Alert SPI built-in implementation
+
+* Email
+
+     Email alert notification
+
+* DingTalk
+
+     Alert for DingTalk group chat bots
+  
+     Related parameter configuration can refer to the DingTalk robot document.
+
+* EnterpriseWeChat
+
+     EnterpriseWeChat alert notifications
+
+     Related parameter configuration can refer to the EnterpriseWeChat robot document.
+
+* Script
+
+     We have implemented a shell script for alerting. We will pass the relevant alert parameters to the script and you can implement your alert logic in the shell. This is a good way to interface with internal alerting applications.
+
+* SMS
+
+     SMS alerts
+* FeiShu
+
+  FeiShu alert notification
+* Slack
+
+  Slack alert notification
+* PagerDuty
+
+  PagerDuty alert notification
+* WebexTeams
+
+  WebexTeams alert notification
+
+  Related parameter configuration can refer to the WebexTeams document.
+
+* Telegram
+
+  Telegram alert notification
+  
+  Related parameter configuration can refer to the Telegram document.
+
+* Http
+
+  We have implemented a Http script for alerting. And calling most of the alerting plug-ins end up being Http requests, if we not support your alert plug-in yet, you can use Http to realize your alert login. Also welcome to contribute your common plug-ins to the community :)
diff --git a/docs/2.0.9/docs/en/contribute/backend/spi/datasource.md b/docs/2.0.9/docs/en/contribute/backend/spi/datasource.md
new file mode 100644
index 0000000..5772b43
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/spi/datasource.md
@@ -0,0 +1,23 @@
+## DolphinScheduler Datasource SPI main design
+
+#### How do I use data sources?
+
+The data source center supports POSTGRESQL, HIVE/IMPALA, SPARK, CLICKHOUSE, SQLSERVER data sources by default.
+
+If you are using MySQL or ORACLE data source, you need to place the corresponding driver package in the lib directory
+
+#### How to do Datasource plugin development?
+
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannel
+org.apache.dolphinscheduler.spi.datasource.DataSourceChannelFactory
+org.apache.dolphinscheduler.plugin.datasource.api.client.CommonDataSourceClient
+
+1. In the first step, the data source plug-in can implement the above interfaces and inherit the general client. For details, refer to the implementation of data source plug-ins such as sqlserver and mysql. The addition methods of all RDBMS plug-ins are the same.
+
+2. Add the driver configuration in the data source plug-in pom.xml
+
+We provide APIs for external access of all data sources in the dolphin scheduler data source API module
+
+#### **Future plan**
+
+Support data sources such as kafka, http, files, sparkSQL, FlinkSQL, etc.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/backend/spi/registry.md b/docs/2.0.9/docs/en/contribute/backend/spi/registry.md
new file mode 100644
index 0000000..0957ff3
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/spi/registry.md
@@ -0,0 +1,27 @@
+### DolphinScheduler Registry SPI Extension
+
+#### how to use?
+
+Make the following configuration (take zookeeper as an example)
+
+* Registry plug-in configuration, take Zookeeper as an example (registry.properties)
+  dolphinscheduler-service/src/main/resources/registry.properties
+  ```registry.properties
+   registry.plugin.name=zookeeper
+   registry.servers=127.0.0.1:2181
+  ```
+
+For specific configuration information, please refer to the parameter information provided by the specific plug-in, for example zk: `org/apache/dolphinscheduler/plugin/registry/zookeeper/ZookeeperConfiguration.java`
+All configuration information prefixes need to be +registry, such as base.sleep.time.ms, which should be configured in the registry as follows: registry.base.sleep.time.ms=100
+
+#### How to expand
+
+`dolphinscheduler-registry-api` defines the standard for implementing plugins. When you need to extend plugins, you only need to implement `org.apache.dolphinscheduler.registry.api.RegistryFactory`.
+
+Under the `dolphinscheduler-registry-plugin` module is the registry plugin we currently provide.
+
+#### FAQ
+
+1: registry connect timeout
+
+You can increase the relevant timeout parameters.
diff --git a/docs/2.0.9/docs/en/contribute/backend/spi/task.md b/docs/2.0.9/docs/en/contribute/backend/spi/task.md
new file mode 100644
index 0000000..70b01d4
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/backend/spi/task.md
@@ -0,0 +1,15 @@
+## DolphinScheduler Task SPI extension
+
+#### How to develop task plugins?
+
+org.apache.dolphinscheduler.spi.task.TaskChannel
+
+The plug-in can implement the above interface. It mainly includes creating tasks (task initialization, task running, etc.) and task cancellation. If it is a yarn task, you need to implement org.apache.dolphinscheduler.plugin.task.api.AbstractYarnTask.
+
+We provide APIs for external access to all tasks in the dolphinscheduler-task-api module, while the dolphinscheduler-spi module is the spi general code library, which defines all the plug-in modules, such as the alarm module, the registry module, etc., you can read and view in detail .
+
+*NOTICE*
+
+Since the task plug-in involves the front-end page, the front-end SPI has not yet been implemented, so you need to implement the front-end page corresponding to the plug-in separately.
+
+If there is a class conflict in the task plugin, you can use [Shade-Relocating Classes](https://maven.apache.org/plugins/maven-shade-plugin/) to solve this problem.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/development-environment-setup.md b/docs/2.0.9/docs/en/contribute/development-environment-setup.md
new file mode 100644
index 0000000..0940682
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/development-environment-setup.md
@@ -0,0 +1,209 @@
+# DolphinScheduler development
+
+## Software Requirements
+Before setting up the DolphinScheduler development environment, please make sure you have installed the software as below:
+
+* [Git](https://git-scm.com/downloads)
+* [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html): v1.8.x (Currently does not support jdk 11)
+* [Maven](http://maven.apache.org/download.cgi): v3.5+
+* [Node](https://nodejs.org/en/download): v16.13+ (dolphinScheduler version is lower than 3.0, please install node v12.20+)
+* [Pnpm](https://pnpm.io/installation): v6.x
+
+### Clone Git Repository
+
+Download the git repository through your git management tool, here we use git-core as an example
+
+```shell
+mkdir dolphinscheduler
+cd dolphinscheduler
+git clone git@github.com:apache/dolphinscheduler.git
+```
+
+### compile source code
+
+Supporting system:
+* MacOS
+* Liunx
+
+Run `mvn clean install -Prelease -Dmaven.test.skip=true`
+
+## Docker image build
+
+DolphinScheduler will release new Docker images after it released, you could find them in [Docker Hub](https://hub.docker.com/search?q=DolphinScheduler).
+
+* If you want to modify DolphinScheduler source code, and build Docker images locally, you can run when finished the modification
+```shell
+cd dolphinscheduler
+./mvnw -B clean package \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Ddocker.tag=<TAG> \
+       -Pdocker,release              
+```
+
+When the command is finished you could find them by command `docker imaegs`.
+
+* If you want to modify DolphinScheduler source code, build and push Docker images to your registry <HUB_URL>,you can run when finished the modification
+```shell
+cd dolphinscheduler
+./mvnw -B clean deploy \
+       -Dmaven.test.skip \
+       -Dmaven.javadoc.skip \
+       -Dmaven.checkstyle.skip \
+       -Dmaven.deploy.skip \
+       -Ddocker.tag=<TAG> \
+       -Ddocker.hub=<HUB_URL> \
+       -Pdocker,release           
+```
+
+* If you want to modify DolphinScheduler source code, and also want to add customize dependencies of Docker image, you can modify the definition of Dockerfile after modifying the source code. You can run the following command to find all Dockerfile files.
+
+```shell
+cd dolphinscheduler
+find . -iname 'Dockerfile'
+```
+
+Then run the Docker build command above
+
+* You could create custom Docker images base on those images if you want to change image like add some dependencies or upgrade package.
+
+```Dockerfile
+FROM dolphinscheduler-standalone-server
+RUN apt update ; \
+    apt install -y <YOUR-CUSTOM-DEPENDENCE> ; \
+```
+
+> **_Note:_** Docker will build and push linux/amd64,linux/arm64 multi-architecture images by default
+>
+> Have to use version after Docker 19.03, because after 19.03 docker contains buildx
+
+
+## Notice
+
+There are two ways to configure the DolphinScheduler development environment, standalone mode and normal mode
+
+* [Standalone mode](#dolphinscheduler-standalone-quick-start): **Recommended**,more convenient to build development environment, it can cover most scenes.
+* [Normal mode](#dolphinscheduler-normal-mode): Separate server master, worker, api, which can cover more test environments than standalone, and it is more like production environment in real life.
+
+## DolphinScheduler Standalone Quick Start
+
+> **_Note:_** Use standalone server only for development and debugging, because it uses H2 Database as default database and Zookeeper Testing Server which may not be stable in production.
+> 
+> Standalone is only supported in DolphinScheduler 1.3.9 and later versions.
+> 
+> Standalone server is able to connect to external databases like mysql and postgresql, see [Standalone Deployment](https://dolphinscheduler.apache.org/en-us/docs/dev/user_doc/guide/installation/standalone.html) for instructions.
+
+### Git Branch Choose
+
+Use different Git branch to develop different codes
+
+* If you want to develop based on a binary package, switch git branch to specific release branch, for example, if you want to develop base on 1.3.9, you should choose branch `1.3.9-release`.
+* If you want to develop the latest code, choose branch branch `dev`.
+
+### Start backend server
+
+Find the class `org.apache.dolphinscheduler.StandaloneServer` in Intellij IDEA and clikc run main function to startup.
+
+### Start frontend server
+
+Install frontend dependencies and run it.
+> Note: You can see more detail about the frontend setting in [frontend development](./frontend-development.md).
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+The browser access address [http://localhost:3000](http://localhost:3000) can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
+
+## DolphinScheduler Normal Mode
+
+### Prepare
+
+#### zookeeper
+
+Download [ZooKeeper](https://www.apache.org/dyn/closer.lua/zookeeper/zookeeper-3.6.3), and extract it.
+
+* Create directory `zkData` and `zkLog`
+* Go to the zookeeper installation directory, copy configure file `zoo_sample.cfg` to `conf/zoo.cfg`, and change value of dataDir in conf/zoo.cfg to dataDir=./tmp/zookeeper
+
+    ```shell
+    # We use path /data/zookeeper/data and /data/zookeeper/datalog here as example
+    dataDir=/data/zookeeper/data
+    dataLogDir=/data/zookeeper/datalog
+    ```
+
+* Run `./bin/zkServer.sh` in terminal by command `./bin/zkServer.sh start`.
+
+#### Database
+
+The DolphinScheduler's metadata is stored in relational database. Currently supported MySQL and Postgresql. We use MySQL as an example. Start the database and create a new database named dolphinscheduler as DolphinScheduler metabase
+
+After creating the new database, run the sql file under `dolphinscheduler/dolphinscheduler-dao/src/main/resources/sql/dolphinscheduler_mysql.sql` directly in MySQL to complete the database initialization
+
+#### Start Backend Server
+
+Following steps will guide how to start the DolphinScheduler backend service
+
+##### Backend Start Prepare
+
+* Open project: Use IDE open the project, here we use Intellij IDEA as an example, after opening it will take a while for Intellij IDEA to complete the dependent download
+
+* File change
+  * If you use MySQL as your metadata database, you need to modify `dolphinscheduler/pom.xml` and change the `scope` of the `mysql-connector-java` dependency to `compile`. This step is not necessary to use PostgreSQL
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-master/src/main/resources/application.yaml`
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-worker/src/main/resources/application.yaml`
+  * Modify database configuration, modify the database configuration in the `dolphinscheduler-api/src/main/resources/application.yaml`
+
+
+We here use MySQL with database, username, password named dolphinscheduler as an example
+  ```application.yaml
+   spring:
+     datasource:
+       driver-class-name: com.mysql.cj.jdbc.Driver
+       url: jdbc:mysql://127.0.0.1:3306/dolphinscheduler?useUnicode=true&characterEncoding=UTF-8
+       username: dolphinscheduler
+       password: dolphinscheduler
+  ```
+
+* Log level: add a line `<appender-ref ref="STDOUT"/>` to the following configuration to enable the log to be displayed on the command line
+
+  `dolphinscheduler-master/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-worker/src/main/resources/logback-spring.xml`
+  `dolphinscheduler-api/src/main/resources/logback-spring.xml`
+
+  here we add the result after modify as below:
+
+  ```diff
+  <root level="INFO">
+  +  <appender-ref ref="STDOUT"/>
+    <appender-ref ref="APILOGFILE"/>
+    <appender-ref ref="SKYWALKING-LOG"/>
+  </root>
+  ```
+
+> **_Note:_** Only DolphinScheduler 2.0 and later versions need to inatall plugin before start server. It not need before version 2.0.
+
+##### Server start
+
+There are three services that need to be started, including MasterServer, WorkerServer, ApiApplicationServer.
+
+* MasterServer:Execute function `main` in the class `org.apache.dolphinscheduler.server.master.MasterServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* WorkerServer:Execute function `main` in the class `org.apache.dolphinscheduler.server.worker.WorkerServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Ddruid.mysql.usePingMethod=false -Dspring.profiles.active=mysql`
+* ApiApplicationServer:Execute function `main` in the class `org.apache.dolphinscheduler.api.ApiApplicationServer` by Intellij IDEA, with the configuration *VM Options* `-Dlogging.config=classpath:logback-spring.xml -Dspring.profiles.active=api,mysql`. After it started, you could find Open API documentation in http://localhost:12345/dolphinscheduler/doc.html
+
+> The `mysql` in the VM Options `-Dspring.profiles.active=mysql` means specified configuration file
+
+### Start Frontend Server
+
+Install frontend dependencies and run it
+
+```shell
+cd dolphinscheduler-ui
+pnpm install
+pnpm run dev
+```
+
+The browser access address [http://localhost:3000](http://localhost:3000) can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
diff --git a/docs/2.0.9/docs/en/contribute/frontend-development.md b/docs/2.0.9/docs/en/contribute/frontend-development.md
new file mode 100644
index 0000000..801610c
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/frontend-development.md
@@ -0,0 +1,639 @@
+# Front-end development documentation
+
+### Technical selection
+```
+Vue mvvm framework
+
+Es6 ECMAScript 6.0
+
+Ans-ui Analysys-ui
+
+D3  Visual Library Chart Library
+
+Jsplumb connection plugin library
+
+Lodash high performance JavaScript utility library
+```
+
+### Development environment
+
+- #### Node installation
+Node package download (note version v12.20.2) `https://nodejs.org/download/release/v12.20.2/` 
+
+- #### Front-end project construction
+Use the command line mode `cd`  enter the `dolphinscheduler-ui` project directory and execute `npm install` to pull the project dependency package.
+
+> If `npm install` is very slow, you can set the taobao mirror
+
+```
+npm config set registry http://registry.npm.taobao.org/
+```
+
+- Modify `API_BASE` in the file `dolphinscheduler-ui/.env` to interact with the backend:
+
+```
+# back end interface address
+API_BASE = http://127.0.0.1:12345
+```
+
+> #####  ! ! ! Special attention here. If the project reports a "node-sass error" error while pulling the dependency package, execute the following command again after execution.
+
+```bash
+npm install node-sass --unsafe-perm #Install node-sass dependency separately
+```
+
+- #### Development environment operation
+- `npm start` project development environment (after startup address http://localhost:8888)
+
+#### Front-end project release
+
+- `npm run build` project packaging (after packaging, the root directory will create a folder called dist for publishing Nginx online)
+
+Run the `npm run build` command to generate a package file (dist) package
+
+Copy it to the corresponding directory of the server (front-end service static page storage directory)
+
+Visit address` http://localhost:8888`
+
+#### Start with node and daemon under Linux
+
+Install pm2 `npm install -g pm2`
+
+Execute `pm2 start npm -- run dev` to start the project in the project `dolphinscheduler-ui `root directory
+
+#### command
+
+- Start `pm2 start npm -- run dev`
+
+- Stop `pm2 stop npm`
+
+- delete `pm2 delete npm`
+
+- Status  `pm2 list`
+
+```
+
+[root@localhost dolphinscheduler-ui]# pm2 start npm -- run dev
+[PM2] Applying action restartProcessId on app [npm](ids: 0)
+[PM2] [npm](0) ✓
+[PM2] Process successfully started
+┌──────────┬────┬─────────┬──────┬──────┬────────┬─────────┬────────┬─────┬──────────┬──────┬──────────┐
+│ App name │ id │ version │ mode │ pid  │ status │ restart │ uptime │ cpu │ mem      │ user │ watching │
+├──────────┼────┼─────────┼──────┼──────┼────────┼─────────┼────────┼─────┼──────────┼──────┼──────────┤
+│ npm      │ 0  │ N/A     │ fork │ 6168 │ online │ 31      │ 0s     │ 0%  │ 5.6 MB   │ root │ disabled │
+└──────────┴────┴─────────┴──────┴──────┴────────┴─────────┴────────┴─────┴──────────┴──────┴──────────┘
+ Use `pm2 show <id|name>` to get more details about an app
+
+```
+
+### Project directory structure
+
+`build` some webpack configurations for packaging and development environment projects
+
+`node_modules` development environment node dependency package
+
+`src` project required documents
+
+`src => combo` project third-party resource localization `npm run combo` specific view `build/combo.js`
+
+`src => font` Font icon library can be added by visiting https://www.iconfont.cn Note: The font library uses its own secondary development to reintroduce its own library `src/sass/common/_font.scss`
+
+`src => images` public image storage
+
+`src => js` js/vue
+
+`src => lib` internal components of the company (company component library can be deleted after open source)
+
+`src => sass` sass file One page corresponds to a sass file
+
+`src => view` page file One page corresponds to an html file
+
+```
+> Projects are developed using vue single page application (SPA)
+- All page entry files are in the `src/js/conf/${ corresponding page filename => home} index.js` entry file
+- The corresponding sass file is in `src/sass/conf/${corresponding page filename => home}/index.scss`
+- The corresponding html file is in `src/view/${corresponding page filename => home}/index.html`
+```
+
+Public module and utill `src/js/module`
+
+`components` => internal project common components
+
+`download` => download component
+
+`echarts` => chart component
+
+`filter` => filter and vue pipeline
+
+`i18n` => internationalization
+
+`io` => io request encapsulation based on axios
+
+`mixin` => vue mixin public part for disabled operation
+
+`permissions` => permission operation
+
+`util` => tool
+
+### System function module
+
+Home  => `http://localhost:8888/#/home`
+
+Project Management => `http://localhost:8888/#/projects/list`
+```
+| Project Home
+| Workflow
+  - Workflow definition
+  - Workflow instance
+  - Task instance
+```
+
+Resource Management => `http://localhost:8888/#/resource/file`
+```
+| File Management
+| udf Management
+  - Resource Management
+  - Function management
+```
+
+Data Source Management => `http://localhost:8888/#/datasource/list`
+
+Security Center => `http://localhost:8888/#/security/tenant`
+```
+| Tenant Management
+| User Management
+| Alarm Group Management
+  - master
+  - worker
+```
+
+User Center => `http://localhost:8888/#/user/account`
+
+## Routing and state management
+
+The project `src/js/conf/home` is divided into
+
+`pages` => route to page directory
+```
+ The page file corresponding to the routing address
+```
+
+`router` => route management
+```
+vue router, the entry file index.js in each page will be registered. Specific operations: https://router.vuejs.org/zh/
+```
+
+`store` => status management
+```
+The page corresponding to each route has a state management file divided into:
+
+actions => mapActions => Details:https://vuex.vuejs.org/zh/guide/actions.html
+
+getters => mapGetters => Details:https://vuex.vuejs.org/zh/guide/getters.html
+
+index => entrance
+
+mutations => mapMutations => Details:https://vuex.vuejs.org/zh/guide/mutations.html
+
+state => mapState => Details:https://vuex.vuejs.org/zh/guide/state.html
+
+Specific action:https://vuex.vuejs.org/zh/
+```
+
+## specification
+## Vue specification
+##### 1.Component name
+The component is named multiple words and is connected with a wire (-) to avoid conflicts with HTML tags and a clearer structure.
+```
+// positive example
+export default {
+    name: 'page-article-item'
+}
+```
+
+##### 2.Component files
+The internal common component of the `src/js/module/components` project writes the folder name with the same name as the file name. The subcomponents and util tools that are split inside the common component are placed in the internal `_source` folder of the component.
+```
+└── components
+    ├── header
+        ├── header.vue
+        └── _source
+            └── nav.vue
+            └── util.js
+    ├── conditions
+        ├── conditions.vue
+        └── _source
+            └── search.vue
+            └── util.js
+```
+
+##### 3.Prop
+When you define Prop, you should always name it in camel format (camelCase) and use the connection line (-) when assigning values to the parent component.
+This follows the characteristics of each language, because it is case-insensitive in HTML tags, and the use of links is more friendly; in JavaScript, the more natural is the hump name.
+
+```
+// Vue
+props: {
+    articleStatus: Boolean
+}
+// HTML
+<article-item :article-status="true"></article-item>
+```
+
+The definition of Prop should specify its type, defaults, and validation as much as possible.
+
+Example:
+
+```
+props: {
+    attrM: Number,
+    attrA: {
+        type: String,
+        required: true
+    },
+    attrZ: {
+        type: Object,
+        //  The default value of the array/object should be returned by a factory function
+        default: function () {
+            return {
+                msg: 'achieve you and me'
+            }
+        }
+    },
+    attrE: {
+        type: String,
+        validator: function (v) {
+            return !(['success', 'fail'].indexOf(v) === -1) 
+        }
+    }
+}
+```
+
+##### 4.v-for
+When performing v-for traversal, you should always bring a key value to make rendering more efficient when updating the DOM.
+```
+<ul>
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+v-for should be avoided on the same element as v-if (`for example: <li>`) because v-for has a higher priority than v-if. To avoid invalid calculations and rendering, you should try to use v-if Put it on top of the container's parent element.
+```
+<ul v-if="showList">
+    <li v-for="item in list" :key="item.id">
+        {{ item.title }}
+    </li>
+</ul>
+```
+
+##### 5.v-if / v-else-if / v-else
+If the elements in the same set of v-if logic control are logically identical, Vue reuses the same part for more efficient element switching, `such as: value`. In order to avoid the unreasonable effect of multiplexing, you should add key to the same element for identification.
+```
+<div v-if="hasData" key="mazey-data">
+    <span>{{ mazeyData }}</span>
+</div>
+<div v-else key="mazey-none">
+    <span>no data</span>
+</div>
+```
+
+##### 6.Instruction abbreviation
+In order to unify the specification, the instruction abbreviation is always used. Using `v-bind`, `v-on` is not bad. Here is only a unified specification.
+```
+<input :value="mazeyUser" @click="verifyUser">
+```
+
+##### 7.Top-level element order of single file components
+Styles are packaged in a file, all the styles defined in a single vue file, the same name in other files will also take effect. All will have a top class name before creating a component.
+Note: The sass plugin has been added to the project, and the sas syntax can be written directly in a single vue file.
+For uniformity and ease of reading, they should be placed in the order of  `<template>`、`<script>`、`<style>`.
+
+```
+<template>
+  <div class="test-model">
+    test
+  </div>
+</template>
+<script>
+  export default {
+    name: "test",
+    data() {
+      return {}
+    },
+    props: {},
+    methods: {},
+    watch: {},
+    beforeCreate() {
+    },
+    created() {
+    },
+    beforeMount() {
+    },
+    mounted() {
+    },
+    beforeUpdate() {
+    },
+    updated() {
+    },
+    beforeDestroy() {
+    },
+    destroyed() {
+    },
+    computed: {},
+    components: {},
+  }
+</script>
+
+<style lang="scss" rel="stylesheet/scss">
+  .test-model {
+
+  }
+</style>
+
+```
+
+## JavaScript specification
+
+##### 1.var / let / const
+It is recommended to no longer use var, but use let / const, prefer const. The use of any variable must be declared in advance, except that the function defined by function can be placed anywhere.
+
+##### 2.quotes
+```
+const foo = 'after division'
+const bar = `${foo},ront-end engineer`
+```
+
+##### 3.function
+Anonymous functions use the arrow function uniformly. When multiple parameters/return values are used, the object's structure assignment is used first.
+```
+function getPersonInfo ({name, sex}) {
+    // ...
+    return {name, gender}
+}
+```
+The function name is uniformly named with a camel name. The beginning of the capital letter is a constructor. The lowercase letters start with ordinary functions, and the new operator should not be used to operate ordinary functions.
+
+##### 4.object
+```
+const foo = {a: 0, b: 1}
+const bar = JSON.parse(JSON.stringify(foo))
+
+const foo = {a: 0, b: 1}
+const bar = {...foo, c: 2}
+
+const foo = {a: 3}
+Object.assign(foo, {b: 4})
+
+const myMap = new Map([])
+for (let [key, value] of myMap.entries()) {
+    // ...
+}
+```
+
+##### 5.module
+Unified management of project modules using import / export.
+```
+// lib.js
+export default {}
+
+// app.js
+import app from './lib'
+```
+
+Import is placed at the top of the file.
+
+If the module has only one output value, use `export default`,otherwise no.
+
+## HTML / CSS
+
+##### 1.Label
+
+Do not write the type attribute when referencing external CSS or JavaScript. The HTML5 default type is the text/css and text/javascript properties, so there is no need to specify them.
+```
+<link rel="stylesheet" href="//www.test.com/css/test.css">
+<script src="//www.test.com/js/test.js"></script>
+```
+
+##### 2.Naming
+The naming of Class and ID should be semantic, and you can see what you are doing by looking at the name; multiple words are connected by a link.
+```
+// positive example
+.test-header{
+    font-size: 20px;
+}
+```
+
+##### 3.Attribute abbreviation
+CSS attributes use abbreviations as much as possible to improve the efficiency and ease of understanding of the code.
+
+```
+// counter example
+border-width: 1px;
+border-style: solid;
+border-color: #ccc;
+
+// positive example
+border: 1px solid #ccc;
+```
+
+##### 4.Document type
+The HTML5 standard should always be used.
+
+```
+<!DOCTYPE html>
+```
+
+##### 5.Notes
+A block comment should be written to a module file.
+```
+/**
+* @module mazey/api
+* @author Mazey <mazey@mazey.net>
+* @description test.
+* */
+```
+
+## interface
+
+##### All interfaces are returned as Promise 
+Note that non-zero is wrong for catching catch
+
+```
+const test = () => {
+  return new Promise((resolve, reject) => {
+    resolve({
+      a:1
+    })
+  })
+}
+
+// transfer
+test.then(res => {
+  console.log(res)
+  // {a:1}
+})
+```
+
+Normal return
+```
+{
+  code:0,
+  data:{}
+  msg:'success'
+}
+```
+
+Error return
+```
+{
+  code:10000, 
+  data:{}
+  msg:'failed'
+}
+```
+If the interface is a post request, the Content-Type defaults to application/x-www-form-urlencoded; if the Content-Type is changed to application/json,
+Interface parameter transfer needs to be changed to the following way
+```
+io.post('url', payload, null, null, { emulateJSON: false } res => {
+  resolve(res)
+}).catch(e => {
+  reject(e)
+})
+```
+
+##### Related interface path
+
+dag related interface `src/js/conf/home/store/dag/actions.js`
+
+Data Source Center Related Interfaces  `src/js/conf/home/store/datasource/actions.js`
+
+Project Management Related Interfaces `src/js/conf/home/store/projects/actions.js`
+
+Resource Center Related Interfaces `src/js/conf/home/store/resource/actions.js`
+
+Security Center Related Interfaces `src/js/conf/home/store/security/actions.js`
+
+User Center Related Interfaces `src/js/conf/home/store/user/actions.js`
+
+## Extended development
+
+##### 1.Add node
+
+(1) First place the icon icon of the node in the `src/js/conf/home/pages/dag `folder, and note the English name of the node defined by the `toolbar_${in the background. For example: SHELL}.png`
+
+(2)  Find the `tasksType` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+```
+'DEPENDENT': {  //  The background definition node type English name is used as the key value
+  desc: 'DEPENDENT',  // tooltip desc
+  color: '#2FBFD8'  // The color represented is mainly used for tree and gantt
+}
+```
+
+(3)  Add a `${node type (lowercase)}`.vue file in `src/js/conf/home/pages/dag/_source/formModel/tasks`. The contents of the components related to the current node are written here. Must belong to a node component must have a function _verification () After the verification is successful, the relevant data of the current component is thrown to the parent component.
+```
+/**
+ * Verification
+*/
+  _verification () {
+    // datasource subcomponent verification
+    if (!this.$refs.refDs._verifDatasource()) {
+      return false
+    }
+
+    // verification function
+    if (!this.method) {
+      this.$message.warning(`${i18n.$t('Please enter method')}`)
+      return false
+    }
+
+    // localParams subcomponent validation
+    if (!this.$refs.refLocalParams._verifProp()) {
+      return false
+    }
+    // store
+    this.$emit('on-params', {
+      type: this.type,
+      datasource: this.datasource,
+      method: this.method,
+      localParams: this.localParams
+    })
+    return true
+  }
+```
+
+(4) Common components used inside the node component are under` _source`, and `commcon.js` is used to configure public data.
+
+##### 2.Increase the status type
+(1) Find the `tasksState` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+
+```
+ 'WAITTING_DEPEND': {  // 'WAITTING_DEPEND': {  //Backend defines state type, frontend is used as key value
+  id: 11,  // front-end definition id is used as a sort
+  desc: `${i18n.$t('waiting for dependency')}`,  // tooltip desc
+  color: '#5101be',  // The color represented is mainly used for tree and gantt
+  icoUnicode: '&#xe68c;',  // font icon
+  isSpin: false  // whether to rotate (requires code judgment)
+}
+```
+
+##### 3.Add the action bar tool
+(1)  Find the `toolOper` object in `src/js/conf/home/pages/dag/_source/config.js` and add it to it.
+```
+{
+  code: 'pointer',  // tool identifier
+  icon: '&#xe781;',  // tool icon
+  disable: disable,  // disable
+  desc: `${i18n.$t('Drag node and selected item')}`  // tooltip desc
+}
+```
+
+(2) Tool classes are returned as a constructor  `src/js/conf/home/pages/dag/_source/plugIn`
+
+`downChart.js`  =>  dag image download processing
+
+`dragZoom.js`  =>  mouse zoom effect processing
+
+`jsPlumbHandle.js`  =>  drag and drop line processing
+
+`util.js`  =>   belongs to the `plugIn` tool class
+
+
+The operation is handled in the `src/js/conf/home/pages/dag/_source/dag.js` => `toolbarEvent` event.
+
+
+##### 3.Add a routing page
+
+(1) First add a routing address`src/js/conf/home/router/index.js` in route management
+```
+routing address{
+  path: '/test',  // routing address
+  name: 'test',  // alias
+  component: resolve => require(['../pages/test/index'], resolve),  // route corresponding component entry file
+  meta: {
+    title: `${i18n.$t('test')} - EasyScheduler`  // title display
+  }
+},
+```
+
+(2)Create a `test` folder in `src/js/conf/home/pages` and create an `index.vue `entry file in the folder.
+
+    This will give you direct access to`http://localhost:8888/#/test`
+
+
+##### 4.Increase the preset mailbox
+
+Find the `src/lib/localData/email.js` startup and timed email address input to automatically pull down the match.
+```
+export default ["test@analysys.com.cn","test1@analysys.com.cn","test3@analysys.com.cn"]
+```
+
+##### 5.Authority management and disabled state processing
+
+The permission gives the userType according to the backUser interface `getUserInfo` interface: `"ADMIN_USER/GENERAL_USER" `permission to control whether the page operation button is `disabled`.
+
+specific operation:`src/js/module/permissions/index.js`
+
+disabled processing:`src/js/module/mixin/disabledState.js`
+
diff --git a/docs/2.0.9/docs/en/contribute/have-questions.md b/docs/2.0.9/docs/en/contribute/have-questions.md
new file mode 100644
index 0000000..2d84759
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/have-questions.md
@@ -0,0 +1,65 @@
+# Have Questions?
+
+## StackOverflow
+
+For usage questions, it is recommended you use the StackOverflow tag [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) as it is an active forum for DolphinScheduler users’ questions and answers.
+
+Some quick tips when using StackOverflow:
+
+- Prior to asking submitting questions, please:
+  - Search StackOverflow’s [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) tag to see if your question has already been answered
+- Please follow the StackOverflow [code of conduct](https://stackoverflow.com/help/how-to-ask)
+- Always use the apache-dolphinscheduler tag when asking questions
+- Please do not cross-post between [StackOverflow](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) and [GitHub issues](https://github.com/apache/dolphinscheduler/issues/new/choose)
+
+Question template:
+
+> **Describe the question**
+>
+> A clear and concise description of what the question is.
+>
+> **Which version of DolphinScheduler:**
+>
+>  -[1.3.0-preview]
+>
+> **Additional context**
+>
+> Add any other context about the problem here.
+>
+> **Requirement or improvement**
+>
+> \- Please describe about your requirements or improvement suggestions.
+
+For broad, opinion based, ask for external resources, debug issues, bugs, contributing to the project, and scenarios, it is recommended you use the[ GitHub issues ](https://github.com/apache/dolphinscheduler/issues/new/choose)or dev@dolphinscheduler.apache.org mailing list.
+
+## Mailing Lists
+
+- [dev@dolphinscheduler.apache.org](https://lists.apache.org/list.html?dev@dolphinscheduler.apache.org) is for people who want to contribute code to DolphinScheduler. [(subscribe)](mailto:dev-subscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20subscribe)) [(unsubscribe)](mailto:dev-unsubscribe@dolphinscheduler.apache.org?subject=(send%20this%20email%20to%20unsubscribe)) [(archives)](http://lists.apache.org/list.html?dev@dolphinscheduler.apache.org)
+
+Some quick tips when using email:
+
+- Prior to asking submitting questions, please:
+  - Search StackOverflow at [apache-dolphinscheduler](https://stackoverflow.com/questions/tagged/apache-dolphinscheduler) to see if your question has already been answered
+
+- Tagging the subject line of your email will help you get a faster response, e.g. [api-server]: How to get open api interface?
+
+- Tags may help identify a topic by:
+  - Component: MasterServer,ApiServer,WorkerServer,AlertServer, etc
+  - Level: Beginner, Intermediate, Advanced
+  - Scenario: Debug, How-to
+
+- For error logs or long code examples, please use [GitHub gist](https://gist.github.com/) and include only a few lines of the pertinent code / log within the email.
+
+## Chat Rooms
+
+Chat rooms are great for quick questions or discussions on specialized topics. 
+
+The following chat rooms are officially part of Apache DolphinScheduler:
+
+​	The Slack workspace URL: http://asf-dolphinscheduler.slack.com/.
+
+​	You can join through invitation url: https://s.apache.org/dolphinscheduler-slack. 
+
+This chat room is used for questions and discussions related to using DolphinScheduler.
+
+ 
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/join/DS-License.md b/docs/2.0.9/docs/en/contribute/join/DS-License.md
new file mode 100644
index 0000000..c3f13d7
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/DS-License.md
@@ -0,0 +1,42 @@
+# License Notice
+
+As we know that DolphinScheduler is an open-source undergoing project at The Apache Software Foundation (ASF), which means that you have to follow the Apache way to become the DolphinScheduler contributor. Furthermore, Apache has extremely strict rules according to the License. This passage will explain the ASF license and how to avoid License risks at the early stage when you participate in DolphinScheduler.
+
+Note: This article only applies to the Apache projects.
+
+### Licenses Could be Accepted to the Apache Project
+
+You have to pay attention to the following open-source software protocols which Apache projects support when you intend to add a new feature to the DolphinScheduler (or other Apache projects), which functions refers to other open-source software references.
+
+[ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html)
+
+If the 3rd party software is not present at the above policy, we are sorry that your code can not pass the audit and we suggest searching for other substitute plans.
+
+Besides,  when you demand new dependencies in the project, please email us about the reason and the outcome of the influence to dev@dolphinscheduler.apache.org to discuss. Besides, you need at least 3 positive votes from the PPMC to finish the whole step.
+
+### How to Legally Use 3rd Party Open-source Software in the DolphinScheduler
+
+Moreover, when we intend to refer a new software ( not limited to 3rd party jar, text, CSS, js, pics, icons, audios etc and modifications based on 3rd party files) to our project, we need to use them legally in addition to the permission of ASF. Refer to the following article:
+
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+
+
+For example, we should contain the NOTICE file (every open-source project has NOTICE file, generally under root directory) of ZooKeeper in our project when we are using ZooKeeper. As the Apache explains, "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work.
+
+We are not going to dive into every 3rd party open-source license policy, you may look up them if interested.
+
+### DolphinScheduler-License Check Rules
+
+In general, we would have our License-check scripts to our project. DolphinScheduler-License is provided by [kezhenxu94](https://github.com/kezhenxu94) which differ a bit from other open-source projects. All in all, we are trying to make sure avoiding the license issues at the first time.
+
+We need to follow the following steps when we need to add new jars or external resources:
+
+* Add the name and the version of the jar file in the known-dependencies.txt
+* Add relevant maven repository address under 'dolphinscheduler-dist/release-docs/LICENSE' directory
+* Append relevant NOTICE files under 'dolphinscheduler-dist/release-docs/NOTICE' directory and make sure they are no different to the original repository
+* Add relevant source code protocols under 'dolphinscheduler-dist/release-docs/license/' directory and the file name should be named as license+filename.txt. Eg: license-zk.txt
+
+### References
+
+* [COMMUNITY-LED DEVELOPMENT "THE APACHE WAY"](https://apache.org/dev/licensing-howto.html)
+* [ASF 3RD PARTY LICENSE POLICY](https://apache.org/legal/resolved.html)
diff --git a/docs/2.0.9/docs/en/contribute/join/become-a-committer.md b/docs/2.0.9/docs/en/contribute/join/become-a-committer.md
new file mode 100644
index 0000000..deac7d8
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/become-a-committer.md
@@ -0,0 +1,11 @@
+# How to Become DolphinScheduler Committer
+
+Anyone can be a contributor to an Apache project. Being a contributor simply means that you take an interest in the project and contribute in some way, ranging from asking sensible questions (which documents the project and provides feedback to developers) through to providing new features as patches.
+
+If you become a valuable contributor to the project you may well be invited to become a committer. Committer is a term used at the ASF to signify someone who is committed to a particular project. It brings with it the privilege of write access to the project repository and resources.
+
+In Dolphinscheduler community, if a committer who have earned even more merit, can be invited to be a part of the Project Management Committee (PMC).
+
+One thing that is sometimes hard to understand when you are new to the open development process used at the ASF, is that we value the community more than the code. A strong and healthy community will be respectful and be a fun and rewarding place. More importantly, a diverse and healthy community can continue to support the code over the longer term, even as individual companies come and go from the field.
+
+More details could be found [here](https://community.apache.org/contributors/).
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/join/code-conduct.md b/docs/2.0.9/docs/en/contribute/join/code-conduct.md
new file mode 100644
index 0000000..5505e95
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/code-conduct.md
@@ -0,0 +1,68 @@
+# Code of Conduct
+
+The following Code of Conduct is based on full compliance with the [Apache Software Foundation Code of Conduct](https://www.apache.org/foundation/policies/conduct.html).
+
+## Development philosophy
+ - **Consistent** code style, naming, and usage are consistent.  
+ - **Easy to read** code is obvious, easy to read and understand, when debugging one knows the intent of the code.
+ - **Neat** agree with the concepts of《Refactoring》and《Code Cleanliness》and pursue clean and elegant code.
+ - **Abstract** hierarchy is clear and the concepts are refined and reasonable. Keep methods, classes, packages, and modules at the same level of abstraction.
+ - **Heart** Maintain a sense of responsibility and continue to be carved in the spirit of artisans.
+ 
+## Development specifications
+
+ - Executing `mvn -U clean package -Prelease` can compile and test through all test cases. 
+ - The test coverage tool checks for no less than dev branch coverage.
+ - In the root directory, use Checkstyle to check your code for special reasons for violating validation rules. The template location is located at ds_check_style.xml.
+ - Follow the coding specifications.
+
+## Coding specifications
+
+ - Use linux line breaks.
+ - Indentation (including empty lines) is consistent with the last line.
+ - An empty line is required between the class declaration and the following variable or method.
+ - There should be no meaningless empty lines.
+ - Classes, methods, and variables should be named as the name implies and abbreviations should be avoided.
+ - Return value variables are named after `result`; `each` is used in loops to name loop variables; and `entry` is used in map instead of `each`.
+ - The cached exception is called `e`; Catch the exception and do nothing, and the exception is named `ignored`.
+ - Configuration Files are named in camelCase, and file names are lowercase with uppercase initial/starting letter.
+ - Code that requires comment interpretation should be as small as possible and interpreted by method name.
+ - `equals` and `==` In a conditional expression, the constant is left, the variable is on the right, and in the expression greater than less than condition, the variable is left and the constant is right.
+ - In addition to the abstract classes used for inheritance, try to design the class as `final`.
+ - Nested loops are as much a method as possible.
+ - The order in which member variables are defined and the order in which parameters are passed is consistent across classes and methods.
+ - Priority is given to the use of guard statements.
+ - Classes and methods have minimal access control.
+ - The private method used by the method should follow the method, and if there are multiple private methods, the writing private method should appear in the same order as the private method in the original method.
+ - Method entry and return values are not allowed to be `null`.
+ - The return and assignment statements of if else are preferred with the tri-objective operator.
+ - Priority is given to `LinkedList` and only use `ArrayList` if you need to get element values in the collection through the index.
+ - Collection types such as `ArrayList`,`HashMap` that may produce expansion must specify the initial size of the collection to avoid expansion.
+ - Logs and notes are always in English.
+ - Comments can only contain `javadoc`, `todo` and `fixme`.
+ - Exposed classes and methods must have javadoc, other classes and methods and methods that override the parent class do not require javadoc.
+
+## Unit test specifications
+
+ - Test code and production code are subject to the same code specifications.
+ - Unit tests are subject to AIR (Automatic, Independent, Repeatable) Design concept.
+   - Automatic: Unit tests should be fully automated, not interactive. Manual checking of output results is prohibited, `System.out`, `log`, etc. are not allowed, and must be verified with assertions. 
+   - Independent: It is prohibited to call each other between unit test cases and to rely on the order of execution. Each unit test can be run independently.
+   - Repeatable: Unit tests cannot be affected by the external environment and can be repeated. 
+ - Unit tests are subject to BCDE(Border, Correct, Design, Error) Design principles.
+   - Border (Boundary value test): The expected results are obtained by entering the boundaries of loop boundaries, special values, data order, etc.
+   - Correct (Correctness test): The expected results are obtained with the correct input.
+   - Design (Rationality Design): Design high-quality unit tests in combination with production code design.
+   - Error (Fault tolerance test): The expected results are obtained through incorrect input such as illegal data, abnormal flow, etc.
+ - If there is no special reason, the test needs to be fully covered.
+ - Each test case needs to be accurately asserted.
+ - Prepare the environment for code separation from the test code.
+ - Only jUnit `Assert`,hamcrest `CoreMatchers`,Mockito Correlation can use static import.
+ - Single-data assertions should use `assertTrue`,`assertFalse`,`assertNull` and `assertNotNull`.
+ - Multi-data assertions should use `assertThat`.
+ - Accurate assertion, try not to use `not`,`containsString` assertion.
+ - The true value of the test case should be named actualXXX, and the expected value should be named expectedXXX.
+ - Classes and Methods with `@Test` labels do not require javadoc.
+
+ - Public specifications.
+   - Each line is no longer than `200` in length, ensuring that each line is semantically complete for easy understanding.
diff --git a/docs/2.0.9/docs/en/contribute/join/commit-message.md b/docs/2.0.9/docs/en/contribute/join/commit-message.md
new file mode 100644
index 0000000..92269a7
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/commit-message.md
@@ -0,0 +1,94 @@
+# Commit Message Notice
+
+### Preface
+
+A good commit message can help other developers (or future developers) quickly understand the context of related changes, and can also help project managers determine whether the commit is suitable for inclusion in the release. But when we checked the commit logs of many open source projects, we found an interesting problem. Some developers have very good code quality, but the commit message record is rather confusing. When other contributors or learners are viewing the code, it can’t be intuitively understood through commit log.
+The purpose of the changes before and after the submission, as Peter Hutterer said:Re-establishing the context of a piece of code is wasteful. We can’t avoid it completely, so our efforts should go to reducing it as much as possible. Commit messages can do exactly that and as a result, a commit message shows whether a developer is a good collaborator. Therefore, DolphinScheduler developed the protocol in conjunction with other communities and official Apache documents.
+
+### Commit Message RIP
+
+#### 1:Clearly modify the content
+
+A commit message should clearly state what issues (bug fixes, function enhancements, etc.) the submission solves, so that other developers can better track the issues and clarify the optimization during the version iteration process.
+
+#### 2:Associate the corresponding Pull Request or Issue
+
+When our changes are large, the commit message should best be associated with the relevant Issue or Pull Request on GitHub, so that our developers can quickly understand the context of the code submission through the associated information when reviewing the code. If the current commit is for an issue, then the issue can be closed in the Footer section.
+
+#### 3:Unified format
+
+The formatted CommitMessage can help provide more historical information for quick browsing, and it can also generate a Change Log directly from commit.
+
+Commit message should include three parts: Header, Body and Footer. Among them, Header is required, Body and Footer can be omitted.
+
+##### Header
+
+The header part has only one line, including three fields: type (required), scope (optional), and subject (required).
+
+[DS-ISSUE number][type] subject
+
+(1) Type is used to indicate the category of commit, and only the following 7 types are allowed.
+
+- feat:New features
+- fix:Bug fixes
+- docs:Documentation
+- style: Format (does not affect changes in code operation)
+- refactor:Refactoring (It is not a new feature or a code change to fix a bug)
+- test:Add test
+- chore:Changes in the build process or auxiliary tools
+
+If the type is feat and fix, the commit will definitely appear in the change log. Other types (docs, chore, style, refactor, test) are not recommended.
+
+(2) Scope
+
+Scope is used to indicate the scope of commit impact, such as server, remote, etc. If there is no suitable scope, you can use \*.
+
+(3) subject
+
+Subject is a short description of the purpose of the commit, no more than 50 characters.
+
+##### Body
+
+The body part is a detailed description of this commit, which can be divided into multiple lines, and the line break will wrap with 72 characters to avoid automatic line wrapping affecting the appearance.
+
+Note the following points in the Body section:
+
+- Use the verb-object structure, note the use of present tense. For example, use change instead of changed or changes
+
+- Don't capitalize the first letter
+
+- The end of the sentence does not need a ‘.’ (period)
+
+##### Footer
+
+Footer only works in two situations
+
+(1) Incompatible changes
+
+If the current code is not compatible with the previous version, the Footer part starts with BREAKING CHANGE, followed by a description of the change, the reason for the change, and the migration method.
+
+(2) Close Issue
+
+If the current commit is for a certain issue, you can close the issue in the Footer section, or close multiple issues at once.
+
+##### For Example
+
+```
+[DS-001][docs-en] add commit message
+
+- commit message RIP
+- build some conventions
+- help the commit messages become clean and tidy
+- help developers and release managers better track issues
+  and clarify the optimization in the version iteration
+
+This closes #001
+```
+
+### Reference documents
+
+[Commit message format](https://cwiki.apache.org/confluence/display/GEODE/Commit+Message+Format)
+
+[On commit messages-Peter Hutterer](http://who-t.blogspot.com/2009/12/on-commit-messages.html)
+
+[RocketMQ Community Operation Conventions](https://mp.weixin.qq.com/s/LKM4IXAY-7dKhTzGu5-oug)
diff --git a/docs/2.0.9/docs/en/contribute/join/contribute.md b/docs/2.0.9/docs/en/contribute/join/contribute.md
new file mode 100644
index 0000000..ea89596
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/contribute.md
@@ -0,0 +1,40 @@
+# Participate in Contributing
+
+First of all, thank you very much for choosing and using DolphinScheduler, and welcome to join the DolphinScheduler family!
+
+We encourage any form of participation in the community that will eventually become Committer or PPMC Such as:
+* Problems will be encountered via github on the [issue](https://github.com/apache/dolphinscheduler/issues) form feedback out.
+* Answer the issue questions that others are asking.
+* Help improve the documentation.
+* Help your project add test cases.
+* Add comments to the code.
+* Submit a PR that fixes the bug or Feature.
+* Publish application case practice, scheduling process analysis, or technical articles related to scheduling.
+* Help promote DolphinScheduler, participate in technical conferences or meetup, sharing and more.
+
+Welcome to the contributing team and join open source starting with submitting your first PR.
+ - For example, add code comments or find "easy to fix" tags or some very simple issue (misspellings, etc.) and so on, first familiarize yourself with the submission process through the first simple PR.
+ 
+Note: Contributions are not limited to PR Only, but contribute to the development of the project.
+
+I'm sure you'll benefit from open source by participating in DolphinScheduler!
+
+### 1. Participate in documentation contributions.
+
+Refer to the [Submit Guide-Document Notice](./document.md)
+
+### 2. Participate in code contributions.
+
+Refer to the [Submit Guide-Issue Notice](./issue.md), [Submit Guide-Pull Request Notice](./pull-request.md), [Submit Guide-Commit Message Notice](./commit-message.md)
+
+### 3. How to pick up an Issue and submit a Pull Request.
+
+If you want to implement a Feature or fix a Bug. Please refer to the following:
+
+* All Bugs and the new Features are recommended and managed using the Issues Page.
+* If you want to develop a Feature, first reply to the Issue associated with that feature, indicating that you are currently working on it. And set yourself a "deadline" when to Submit the Feature, and add it in the reply comment.
+* It's a good idea to find a mentor (or an instructor) in the core contributors who gives immediate feedback on design and functional implementation.
+* You should create a new branch to start your work, to get the name of the branch refer to the [Submit Guide-Pull Request Notice](./pull-request.md). For example, if you want to complete the feature and submit Issue 111, your branch name should be feature-111. The feature name can be determined after discussion with the instructor.
+* When you're done, send a Pull Request to dolphinscheduler, please refer to the《[Submit Guide-Submit Pull Request Process](./submit-code.md)》
+
+If you want to submit a Pull Request to complete a Feature or fix a Bug, it is recommended that you start with the `good first issue`, `easy-to-fix` issues, complete a small function to submit, do not change too many files at a time, changing too many files will also put a lot of pressure on Reviewers, it is recommended to submit them through multiple Pull Requests, not all at once.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/join/document.md b/docs/2.0.9/docs/en/contribute/join/document.md
new file mode 100644
index 0000000..f2fd831
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/document.md
@@ -0,0 +1,62 @@
+# Documentation Notice
+
+Good documentation is critical for any type of software. Any contribution that can improve the DolphinScheduler documentation is welcome.
+
+###  Get the document project
+
+Documentation for the DolphinScheduler project is maintained in a separate [git repository](https://github.com/apache/dolphinscheduler-website).
+
+First you need to fork the document project into your own github repository, and then clone the document to your local computer.
+
+```
+git clone https://github.com/<your-github-user-name>/dolphinscheduler-website
+```
+
+### The document environment
+
+The DolphinScheduler website is supported by [docsite](https://github.com/chengshiwen/docsite-ext)
+
+Make sure that your node version is 10+, docsite does not yet support versions higher than 10.x.
+
+### Document build guide
+
+1. Run `npm install` in the root directory to install the dependencies.
+
+2. Run commands to collect resources 2.1.Run `export PROTOCOL_MODE=ssh` tells Git clone resource via SSH protocol instead of HTTPS protocol. 2.2.Run `./scripts/prepare_docs.sh` prepare all related resources, for more information you could see [how prepare script work](https://github.com/apache/dolphinscheduler-website/blob/master/HOW_PREPARE_WOKR.md).
+
+3. Run `npm run start` in the root directory to start a local server, you will see the website in 'http://localhost:8080'.
+
+4. Run `npm run build` to build source code into dist directory.
+
+5. Verify your change locally: `python -m SimpleHTTPServer 8000`, when your python version is 3 use :`python3 -m http.server 8000` instead.
+
+If the latest version of node is installed locally, consider using `nvm` to allow different versions of `node` to run on your computer.
+
+1. Refer to the [Instructions](http://nvm.sh) to install nvm.
+
+2. Run `nvm install v10.23.1` to install node v10.
+
+3. Run `nvm use v10.23.1` to switch the current working environment to node v10.
+
+Now you can run and build the website in your local environment.
+
+### The document specification
+
+1. ** Spaces are Required ** between Chinese characters and English or numbers and ** Spaces are not required ** between Chinese punctuation marks and English or numbers, to enhance the aesthetics and readability of the Chinese-English mix.
+
+2. It is recommended that you use "you" in general. Of course, you can use the term when necessary, such as when there is a warning prompt.
+
+### How to submit a document Pull Request
+
+1. Do not use "git add." to commit all changes.
+
+2. Simply push the changed files, for example:
+
+ * `*.md`
+ * `blog.js or docs.js or site.js`
+
+3. Submit the Pull Request to the **master** branch.
+
+### Reference to the documentation
+
+[Apache Flink Translation Specifications](https://cwiki.apache.org/confluence/display/FLINK/Flink+Translation+Specifications)
diff --git a/docs/2.0.9/docs/en/contribute/join/issue.md b/docs/2.0.9/docs/en/contribute/join/issue.md
new file mode 100644
index 0000000..376b065
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/issue.md
@@ -0,0 +1,136 @@
+# Issue Notice
+
+## Preface
+Issues function is used to track various Features, Bugs, Functions, etc. The project maintainer can organize the tasks to be completed through issues.
+
+Issue is an important step in drawing out a feature or bug,
+and the contents that can be discussed in an issue are not limited to the features, the causes of the existing bugs, the research on preliminary scheme, and the corresponding implementation design and code design.
+
+And only when the Issue is approved, the corresponding Pull Request should be implemented.
+
+If an issue corresponds to a large feature, it is recommended to divide it into multiple small issues according to the functional modules and other dimensions.
+
+## Specification
+
+### Issue title
+
+Title Format: [`Issue Type`][`Module Name`] `Issue Description`
+
+The `Issue Type` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue Type</th>
+            <th style="width: 20%; text-align: center;">Description</th>
+            <th style="width: 20%; text-align: center;">Example</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">Include expected new features and functions</td>
+            <td style="text-align: center;">[Feature][api] Add xxx api in xxx controller</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">Bugs in the program</td>
+            <td style="text-align: center;">[Bug][api] Throw exception when xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">Some improvements of the current program, not limited to code format, program performance, etc</td>
+            <td style="text-align: center;">[Improvement][server] Improve xxx between Master and Worker</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">Specifically for the test case</td>
+            <td style="text-align: center;">[Test][server] Add xxx e2e test</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">Those generally are subtasks of feature class. For large features, they can be divided into many small subtasks to complete one by one</td>
+            <td style="text-align: center;">[Sub-Task][server] Implement xxx in xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+The `Module Name` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Module Name</th>
+            <th style="width: 20%; text-align: center;">Description</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">alert</td>
+            <td style="text-align: center;">Alert module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">api</td>
+            <td style="text-align: center;">Application program interface layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">service</td>
+            <td style="text-align: center;">Application service layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">dao</td>
+            <td style="text-align: center;">Application data access layer module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">plugin</td>
+            <td style="text-align: center;">Plugin module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">remote</td>
+            <td style="text-align: center;">Communication module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">server</td>
+            <td style="text-align: center;">Server module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">ui</td>
+            <td style="text-align: center;">Front end module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs-zh</td>
+            <td style="text-align: center;">Chinese document module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">docs</td>
+            <td style="text-align: center;">English document module</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">...</td>
+            <td style="text-align: center;">-</td>
+        </tr>
+    </tbody>
+</table>
+
+### Issue content template
+
+https://github.com/apache/dolphinscheduler/tree/dev/.github/ISSUE_TEMPLATE
+
+### Contributor
+
+Except for some special cases, it is recommended to discuss under issue or mailing list to determine the design scheme or provide the design scheme,
+as well as the code implementation design before completing the issue.
+
+If there are many different solutions, it is suggested to make a decision through mailing list or voting under issue.
+The issue can be implemented after final scheme and code implementation design being approved.
+The main purpose of this is to avoid wasting time caused by different opinions on implementation design or reconstruction in the pull request review stage.
+
+### Question
+
+- How to deal with the user who raises an issue does not know the module corresponding to the issue.
+
+    It is true that most users when raising issue do not know which module the issue belongs to.
+    In fact, this is very common in many open source communities. In this case, the committer / contributor actually knows the module affected by the issue.
+    If the issue is really valuable after being approved by committer and contributor, then the committer can modify the issue title according to the specific module involved in the issue,
+    or leave a message to the user who raises the issue to modify it into the corresponding title.
+
diff --git a/docs/2.0.9/docs/en/contribute/join/microbench.md b/docs/2.0.9/docs/en/contribute/join/microbench.md
new file mode 100644
index 0000000..5cf148c
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/microbench.md
@@ -0,0 +1,100 @@
+# Micro BenchMark Notice
+
+All optimization must be based on data verification, and blind optimization is rejected. Based on this, we provide the MicroBench module.
+
+The MicroBench module is based on the OpenJDK JMH component (HotSpot's recommended benchmark test program). When you start benchmarking, you don't need additional dependencies.
+
+JMH, the Java MicroBenchmark Harness, is a tool suite dedicated to code microbenchmark testing. What is Micro Benchmark? Simply put, it is based on method-level benchmark testing, with an accuracy of microseconds. When you locate a hot method and want to further optimize the performance of the method, you can use JMH to quantitatively analyze the optimized results.
+
+### Several points to note in Java benchmark testing:
+
+- Prevent useless code from entering the test method.
+
+- Concurrent testing.
+
+- The test results are presented.
+
+### Typical application scenarios of JMH are:
+
+- 1: Quantitatively analyze the optimization effect of a hotspot function
+
+- 2: Want to quantitatively know how long a function needs to be executed, and the correlation between execution time and input variables
+
+- 3: Compare multiple implementations of a function
+
+DolphinScheduler-MicroBench provides AbstractBaseBenchmark, you can inherit from it, write your benchmark code, AbstractMicroBenchmark can guarantee to run in JUnit mode.
+
+### Customized operating parameters
+
+The default AbstractMicrobenchmark configuration is
+
+Warmup times 10 (warmupIterations)
+
+Number of tests 10 (measureIterations)
+
+Fork quantity 2 (forkCount)
+
+You can specify these parameters at startup,-DmeasureIterations, -DperfReportDir (output benchmark test result file directory), -DwarmupIterations, -DforkCount
+
+### DolphinScheduler-MicroBench Introduction
+
+It is generally not recommended to use fewer cycles when running tests. However, a smaller number of tests helps to verify the work during the benchmark test. After the verification is over, run a large number of benchmark tests.
+
+```java
+@Warmup(iterations = 2, time = 1)
+@Measurement(iterations = 4, time = 1)
+@State(Scope.Benchmark)
+public class EnumBenchMark extends AbstractBaseBenchmark {
+
+}
+```
+
+This can run benchmarks at the method level or the class level. Command line parameters will override the parameters on the annotation.
+
+```java
+@Benchmark // Method annotation, indicating that the method is an object that needs to be benchmarked.
+@BenchmarkMode(Mode.AverageTime) // Optional benchmark test mode is obtained through enumeration
+@OutputTimeUnit(TimeUnit.MICROSECONDS) // Output time unit
+public void enumStaticMapTest() {
+    TestTypeEnum.newGetNameByType(testNum);
+}
+```
+
+When your benchmark test is written, you can run it to view the specific test conditions: (The actual results depend on your system configuration)
+
+First, it will warm up our code,
+
+```java
+# Warmup Iteration   1: 0.007 us/op
+# Warmup Iteration   2: 0.008 us/op
+Iteration   1: 0.004 us/op
+Iteration   2: 0.004 us/op
+Iteration   3: 0.004 us/op
+Iteration   4: 0.004 us/op
+```
+
+After warmup, we usually get the following results
+
+```java
+Benchmark                        (testNum)   Mode  Cnt          Score           Error  Units
+EnumBenchMark.simpleTest               101  thrpt    8  428750972.826 ±  66511362.350  ops/s
+EnumBenchMark.simpleTest               108  thrpt    8  299615240.337 ± 290089561.671  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  288423221.721 ± 130542990.747  ops/s
+EnumBenchMark.simpleTest               104  thrpt    8  236811792.152 ± 155355935.479  ops/s
+EnumBenchMark.simpleTest               105  thrpt    8  472247775.246 ±  45769877.951  ops/s
+EnumBenchMark.simpleTest               103  thrpt    8  455473025.252 ±  61212956.944  ops/s
+EnumBenchMark.enumStaticMapTest        101   avgt    8          0.006 ±         0.003  us/op
+EnumBenchMark.enumStaticMapTest        108   avgt    8          0.005 ±         0.002  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.006 ±         0.005  us/op
+EnumBenchMark.enumStaticMapTest        104   avgt    8          0.006 ±         0.004  us/op
+EnumBenchMark.enumStaticMapTest        105   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumStaticMapTest        103   avgt    8          0.004 ±         0.001  us/op
+EnumBenchMark.enumValuesTest           101   avgt    8          0.011 ±         0.004  us/op
+EnumBenchMark.enumValuesTest           108   avgt    8          0.025 ±         0.016  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.019 ±         0.010  us/op
+EnumBenchMark.enumValuesTest           104   avgt    8          0.018 ±         0.018  us/op
+EnumBenchMark.enumValuesTest           105   avgt    8          0.014 ±         0.012  us/op
+EnumBenchMark.enumValuesTest           103   avgt    8          0.012 ±         0.009  us/op
+```
+
+OpenJDK officially gave a lot of sample codes, interested students can query and learn JMH by themselves:[OpenJDK-JMH-Example](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/)
diff --git a/docs/2.0.9/docs/en/contribute/join/pull-request.md b/docs/2.0.9/docs/en/contribute/join/pull-request.md
new file mode 100644
index 0000000..fece5d7
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/pull-request.md
@@ -0,0 +1,94 @@
+# Pull Request Notice
+
+## Preface
+Pull Request is a way of software cooperation, which is a process of bringing code involving different functions into the trunk. During this process, the code can be discussed, reviewed, and modified.
+
+In Pull Request, we try not to discuss the implementation of the code. The general implementation of the code and its logic should be determined in Issue. In the Pull Request, we only focus on the code format and code specification, so as to avoid wasting time caused by different opinions on implementation.
+
+## Specification
+
+### Pull Request Title
+
+Title Format: [`Pull Request Type`-`Issue No`][`Module Name`] `Pull Request Description`
+
+The corresponding relationship between `Pull Request Type` and `Issue Type` is as follows:
+
+<table>
+    <thead>
+        <tr>
+            <th style="width: 10%; text-align: center;">Issue Type</th>
+            <th style="width: 20%; text-align: center;">Pull Request Type</th>
+            <th style="width: 20%; text-align: center;">Example(Suppose Issue No is 3333)</th>
+        </tr>
+    </thead>
+    <tbody>
+        <tr>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">Feature</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Bug</td>
+            <td style="text-align: center;">Fix</td>
+            <td style="text-align: center;">[Fix-3333][server] Fix xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">Improvement</td>
+            <td style="text-align: center;">[Improvement-3333][alert] Improve the performance of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">Test</td>
+            <td style="text-align: center;">[Test-3333][api] Add the e2e test of xxx</td>
+        </tr>
+        <tr>
+            <td style="text-align: center;">Sub-Task</td>
+            <td style="text-align: center;">(Parent type corresponding to Sub-Task)</td>
+            <td style="text-align: center;">[Feature-3333][server] Implement xxx</td>
+        </tr>
+    </tbody>
+</table>
+
+`Issue No` refers to the Issue number corresponding to the current Pull Request to be resolved, `Module Name` is the same as the `Module Name` of Issue.
+
+### Pull Request Branch
+
+Branch name format: `Pull Request type`-`Issue number`. e.g. Feature-3333
+
+### Pull Request Content
+
+Please refer to the commit message section.
+
+### Pull Request Code Style
+
+Code style is the thing you have to consider when you submit pull request for DolphinScheduler. We using [Checkstyle](https://checkstyle.sourceforge.io), a development tool to help programmers write Java code that adheres to a coding standard, in CI to keep DolphinScheduler codebase in the same style. Your pull request could not be merged if your code style checker failed. You could format your code by *Checkstyle* in your local environment before you submit your pull request to check code style. The activation step as below:
+
+1. Prepare Checkstyle configuration file: You could download it manually by [click here](https://github.com/apache/dolphinscheduler/blob/3.0.0/style/checkstyle.xml), but find it in DolphinScheduler repository would be a better way. You could find configuration file in the path `style/checkstyle.xml` after you clone repository from Github.
+
+2. Download Checkstyle plugins in Intellij IDEA: Search plugin by keyword **CheckStyle-IDEA** or install in [this page](https://plugins.jetbrains.com/plugin/1065-checkstyle-idea). You could see [install plugin](https://www.jetbrains.com/help/idea/managing-plugins.html#install_plugin_from_repo) if you do not know how to install plugin in Intellij IDEA
+
+3. Configure and activate Checkstyle and Intellij IDEA code-style: After completing the above steps, you could configure and activate it in your environment. You could find Checkstyle plugins in the path `Preferences -> Tool -> Checkstyle`. After that you could activate Checkstyles as screenshot show
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/checkstyle-idea.png" alt="checkstyle idea configuration" />
+</p>
+
+For now your Checkstyle plugins are setup, it would show codes and files which out of style. We highly recommend you configure Intellij IDEA code-style for auto-formatting your code in Intellij IDEA, you could find this setting in `Preferences -> Editor -> Code Style -> Java` and then activate it as screenshot show
+
+<p align="center">
+    <img src="../../../../img/contribute/join/pull-request/code-style-idea.png" alt="code style idea configuration" />
+</p>
+
+1. Format your codebase in Intellij IDEA before submit your pull request: After you done above steps, you could using Intellij IDEA shortcut `Command + L`(for Mac) or `Ctrl+L`(for Windows) to format your code. The best time to format your code is before you commit your change to your local git repository.
+
+### Question
+
+- How to deal with one Pull Request to many Issues scenario.
+
+  First of all, there are fewer scenarios for one Pull Request to many Issues.
+  The root cause is that multiple issues need to do the same thing.
+  Usually, there are two solutions to this scenario: the first is to merge multiple issues with into the same issue, and then close the other issues;
+  the second is multiple issues have subtle differences.
+  In this scenario, the responsibilities of each issue can be clearly divided. The type of each issue is marked as Sub-Task, and then these sub task type issues are associated with one issue.
+  And each Pull Request is submitted should be associated with only one issue of a sub task.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/contribute/join/review.md b/docs/2.0.9/docs/en/contribute/join/review.md
new file mode 100644
index 0000000..e4dc799
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/review.md
@@ -0,0 +1,153 @@
+# Community Review
+
+Beside submit Issues and pull requests to the GitHub repository mentioned in [team](/en-us/community/community.html), another important way to
+contribute to DolphinScheduler is reviewing GitHub Issues or Pull Requests. You can not only know the latest new and
+direction of the community, but also understand the good design in others during you reviewing. At the same time, you can
+increase your exposure in the community and accumulate your honor.
+
+Anyone is encouraged to review Issues and Pull Requests. We also raise a Help Wanted email discussion to solicit contributors
+from the community to review them. You could see detail in [mail][mail-review-wanted], we put the results of mail thread
+in [GitHub Discussion][discussion-result-review-wanted].
+
+> Note: It is only users mentioned in the [GitHub Discussion][discussion-result-review-wanted] can review Issues or Pull
+> Requests, Community advocates **Anyone is encouraged to review Issues and Pull Requests**. Users in 
+> [GitHub Discussion][discussion-result-review-wanted] show their willing to review when we collect in the mail thread.
+> The advantage of this list is when the community has discussion, in addition to the mention Members in [team](/en-us/community/community.html),
+> you can also find some help in [GitHub Discussion][discussion-result-review-wanted] people. If you want to join the
+> [GitHub Discussion][discussion-result-review-wanted], please comment in that discussion and leave a module you are interested
+> in, and the maintainer will add you to the list.
+
+## How Reviewing
+
+DolphinScheduler receives community contributions through GitHub, and all its Issues and Pull Requests are hosted in GitHub.
+If you want to join community by reviewing, please go to section [review Issues](#issues), if you prefer Pull Requests please
+go to section [review Pull Requests](#pull-requests).
+
+### Issues
+
+Review Issues means discuss [Issues][all-issues] in GitHub and give suggestions on it. Include but are not limited to the following situations
+
+| Situation | Reason | Label | Action |
+| ------ | ------ | ------ | ------ |
+| wont fix | Has been fixed in dev branch | [wontfix][label-wontfix] | Close Issue, inform creator the fixed version if it already release |
+| duplicate issue | Had the same problem before | [duplicate][label-duplicate] | Close issue, inform creator the link of same issue |
+| Description not clearly | Without detail reproduce step | [need more information][label-need-more-information] | Inform creator add more description |
+
+In addition give suggestion, add label for issue is also important during review. The labeled issues can be retrieved
+better, which convenient for further processing. An issue can with more than one label. Common issue categories are:
+
+| Label | Meaning |
+| ------ | ------ |
+| [UI][label-UI] | UI and front-end related |
+| [security][label-security] | Security Issue |
+| [user experience][label-user-experience] | User experience Issue |
+| [development][label-development] | Development Issue |
+| [Python][label-Python] | Python Issue |
+| [plug-in][label-plug-in] | Plug-in Issue |
+| [document][label-document] | Document Issue |
+| [docker][label-docker] | Docker Issue |
+| [need verify][label-need-verify] | Need verify Issue |
+| [e2e][label-e2e] | E2E Issue |
+| [win-os][label-win-os] | windows operating system Issue |
+| [suggestion][label-suggestion] | Give suggestion to us |
+ 
+Beside classification, label could also set the priority of Issues. The higher the priority, the more attention pay
+in the community, the easier it is to be fixed or implemented. The priority label are as follows
+
+| Label | priority |
+| ------ | ------ |
+| [priority:high][label-priority-high] | High priority |
+| [priority:middle][label-priority-middle] | Middle priority |
+| [priority:low][label-priority-low] | Low priority |
+
+All the labels above in common label. For all labels in this project you could see in [full label list][label-all-list]
+
+Before reading following content, please make sure you have labeled the Issue.
+  
+* Remove label [Waiting for reply][label-waiting-for-reply] after replying: Label [Waiting for reply][label-waiting-for-reply]
+  added when [creating an Issue][issue-choose]. It makes positioning un reply issue more convenient, and you should remove
+  this label after you reviewed it. If you do not remove it, will cause others to waste time looking on the same issue.
+* Mark [Waiting for review][label-waiting-for-review] when not sure whether issue is resolved or not: There are two situations
+  when you review issue. One is the problem has been located or resolved, maybe have to [Create PR](./submit-code.md)
+  when necessary. Secondly, you are not sure about this issue, you can labeled [Waiting for review][label-waiting-for-review]
+  and mention others to make a second confirmation.
+
+When an Issue need to create Pull Requests, you could also labeled it from below.
+
+| Label | Mean |
+| ------ | ------ |
+| [Chore][label-Chore] | Chore for project |
+| [Good first issue][label-good-first-issue] | Good first issue for new contributor |
+| [easy to fix][label-easy-to-fix] | Easy to fix, harder than `Good first issue` |
+| [help wanted][label-help-wanted] | Help wanted |
+
+> Note: Only members have permission to add or delete label. When you need to add or remove lebals but are not member,
+> you can `@`  members to do that. But as long as you have a GitHub account, you can comment on issues and give suggestions.
+> We encourage everyone in the community to comment and answer issues
+
+### Pull Requests
+
+<!-- markdown-link-check-disable -->
+Review Pull mean discussing in [Pull Requests][all-PRs] in GitHub and giving suggestions to it. DolphinScheduler's 
+Pull Requests reviewing are the same as [GitHub's reviewing changes in pull requests][gh-review-pr]. You can give your
+suggestions in Pull Requests
+
+* When you think the Pull Request is OK to be merged, you can agree to the Pull Request according to the "Approve" process
+  in [GitHub's reviewing changes in pull requests][gh-review-pr].
+* When you think Pull Request needs to be changed, you can comment it according to the "Comment" process in 
+  [GitHub's reviewing changes in pull requests][gh-review-pr]. And when you think issues that must be fixed before they
+  merged, please follow "Request changes" in [GitHub's reviewing changes in pull requests][gh-review-pr] to ask contributors
+  modify it.
+<!-- markdown-link-check-enable -->
+
+Labeled Pull Requests is an important part. Reasonable classification can save a lot of time for reviewers. The good news
+is that the label's name and usage of Pull Requests are the same in [Issues](#issues), which can reduce the memory. For
+example, if there is a Pull Request is related to docker and block deployment. We can label it with [docker][label-docker]
+and [priority:high][label-priority-high].
+
+Pull Requests have some unique labels of it own
+
+| Label | Mean |
+| ------ | ------ |
+| [miss document][label-miss-document] | Pull Requests miss document, and should be add |
+| [first time contributor][label-first-time-contributor] | Pull Requests submit by first time contributor |
+| [don't merge][label-do-not-merge] | Pull Requests have some problem and should not be merged |
+
+> Note: Only members have permission to add or delete label. When you need to add or remove lebals but are not member,
+> you can `@`  members to do that. But as long as you have a GitHub account, you can comment on Pull Requests and give suggestions.
+> We encourage everyone in the community to review Pull Requests
+
+[mail-review-wanted]: https://lists.apache.org/thread/9flwlzrp69xjn6v8tdkbytq8glqp2k51
+[discussion-result-review-wanted]: https://github.com/apache/dolphinscheduler/discussions/7545
+[label-wontfix]: https://github.com/apache/dolphinscheduler/labels/wontfix
+[label-duplicate]: https://github.com/apache/dolphinscheduler/labels/duplicate
+[label-need-more-information]: https://github.com/apache/dolphinscheduler/labels/need%20more%20information
+[label-win-os]: https://github.com/apache/dolphinscheduler/labels/win-os
+[label-waiting-for-reply]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20reply
+[label-waiting-for-review]: https://github.com/apache/dolphinscheduler/labels/Waiting%20for%20review
+[label-user-experience]: https://github.com/apache/dolphinscheduler/labels/user%20experience
+[label-development]: https://github.com/apache/dolphinscheduler/labels/development
+[label-UI]: https://github.com/apache/dolphinscheduler/labels/UI
+[label-suggestion]: https://github.com/apache/dolphinscheduler/labels/suggestion
+[label-security]: https://github.com/apache/dolphinscheduler/labels/security
+[label-Python]: https://github.com/apache/dolphinscheduler/labels/Python
+[label-plug-in]: https://github.com/apache/dolphinscheduler/labels/plug-in
+[label-document]: https://github.com/apache/dolphinscheduler/labels/document
+[label-docker]: https://github.com/apache/dolphinscheduler/labels/docker
+[label-all-list]: https://github.com/apache/dolphinscheduler/labels
+[label-Chore]: https://github.com/apache/dolphinscheduler/labels/Chore
+[label-good-first-issue]: https://github.com/apache/dolphinscheduler/labels/good%20first%20issue
+[label-help-wanted]: https://github.com/apache/dolphinscheduler/labels/help%20wanted
+[label-easy-to-fix]: https://github.com/apache/dolphinscheduler/labels/easy%20to%20fix
+[label-priority-high]: https://github.com/apache/dolphinscheduler/labels/priority%3Ahigh
+[label-priority-middle]: https://github.com/apache/dolphinscheduler/labels/priority%3Amiddle
+[label-priority-low]: https://github.com/apache/dolphinscheduler/labels/priority%3Alow
+[label-miss-document]: https://github.com/apache/dolphinscheduler/labels/miss%20document
+[label-first-time-contributor]: https://github.com/apache/dolphinscheduler/labels/first%20time%20contributor
+[label-do-not-merge]: https://github.com/apache/dolphinscheduler/labels/don%27t%20merge
+[label-e2e]: https://github.com/apache/dolphinscheduler/labels/e2e
+[label-need-verify]: https://github.com/apache/dolphinscheduler/labels/need%20to%20verify
+[issue-choose]: https://github.com/apache/dolphinscheduler/issues/new/choose
+[all-issues]: https://github.com/apache/dolphinscheduler/issues
+[all-PRs]: https://github.com/apache/dolphinscheduler/pulls
+[gh-review-pr]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/reviewing-changes-in-pull-requests/about-pull-request-reviews
diff --git a/docs/2.0.9/docs/en/contribute/join/security.md b/docs/2.0.9/docs/en/contribute/join/security.md
new file mode 100644
index 0000000..28bcda1
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/security.md
@@ -0,0 +1,8 @@
+# Security
+
+The Apache Software Foundation takes a rigorous stance on eliminating security issues in its software projects. Apache DolphinScheduler is also very concerned Security issues related to its features and functionality.
+
+If you have apprehensions regarding DolphinScheduler’s security or you discover vulnerability or potential threat, don’t hesitate to get in touch with the Apache Security Team by dropping a mail at [security@apache.org](mailto:security@apache.org). Please specify the project name as DolphinScheduler in the email and provide a description of the relevant problem or potential threat. You are also urged to recommend the way to reproduce and replicate the issue. The apache security team and the DolphinScheduler community will get back to you after assessing and analysing the findings.
+
+Please pay attention to report the security issue on the security email before disclosing it on public domain.
+
diff --git a/docs/2.0.9/docs/en/contribute/join/submit-code.md b/docs/2.0.9/docs/en/contribute/join/submit-code.md
new file mode 100644
index 0000000..ac87950
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/submit-code.md
@@ -0,0 +1,63 @@
+# Submit Code
+
+* First from the remote repository *https://github.com/apache/dolphinscheduler.git* fork a copy of the code into your own repository
+
+* There are currently three branches in the remote repository:
+    * master           normal delivery branch
+        After the stable release, merge the code from the stable branch into the master.
+    
+    * dev              daily development branch
+        Every day dev development branch, newly submitted code can pull request to this branch.
+
+
+* Clone your repository to your local
+    `git clone https://github.com/apache/dolphinscheduler.git`
+
+* Add remote repository address, named upstream
+    `git remote add upstream https://github.com/apache/dolphinscheduler.git`
+
+* View repository
+    `git remote -v`
+
+>At this time, there will be two repositories: origin (your own repository) and upstream (remote repository)
+
+* Get/Update remote repository code
+    `git fetch upstream`
+
+* Synchronize remote repository code to local repository
+
+```
+git checkout origin/dev
+git merge --no-ff upstream/dev
+```
+
+If remote branch has a new branch such as `dev-1.0`, you need to synchronize this branch to the local repository
+      
+```
+git checkout -b dev-1.0 upstream/dev-1.0
+git push --set-upstream origin dev-1.0
+```
+
+* Create new branch
+```
+git checkout -b xxx origin/dev
+```
+
+Make sure that the branch `xxx` is building successfully on the latest code of the official dev branch
+* After modifying the code locally in the new branch, submit it to your own repository:
+  
+`git commit -m 'commit content'`
+    
+`git push origin xxx --set-upstream`
+
+* Submit changes to the remote repository
+
+* On the github page, click "New pull request".
+
+* Select the modified local branch and the branch you want to merge with the past, click "Create pull request".
+
+* Then the community Committers will do CodeReview, and then he will discuss some details (including design, implementation, performance, etc.) with you. When everyone on the team is satisfied with this modification, the commit will be merged into the dev branch
+
+* Finally, congratulations, you have become an official contributor to dolphinscheduler!
+
+
diff --git a/docs/2.0.9/docs/en/contribute/join/subscribe.md b/docs/2.0.9/docs/en/contribute/join/subscribe.md
new file mode 100644
index 0000000..f6e8a74
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/subscribe.md
@@ -0,0 +1,23 @@
+# Subscribe Mailing Lists
+
+It is highly recommended to subscribe to the development mailing list to keep up-to-date with the community.
+
+In the process of using DolphinScheduler, if you have any questions or ideas, suggestions, you can participate in the DolphinScheduler community building through the Apache mailing list. Sending a subscription email is also very simple, the steps are as follows:
+
+1. Send an email to dev-subscribe@dolphinscheduler.apache.org with your own email address, subject and content are arbitrary.
+
+2. Receive confirmation email and reply. After completing step 1, you will receive a confirmation email from dev-help@dolphinscheduler.apache.org (if not received, please confirm whether the email is automatically classified as spam, promotion email, subscription email, etc.) . Then reply directly to the email, or click on the link in the email to reply quickly, the subject and content are arbitrary.
+
+3. Receive a welcome email. After completing the above steps, you will receive a welcome email with the subject WELCOME to dev@dolphinscheduler.apache.org, and you have successfully subscribed to the Apache DolphinScheduler mailing list.
+
+# Unsubscribe Mailing Lists
+
+If you do not need to know what's going on with DolphinScheduler, you can unsubscribe from the mailing list.
+
+Unsubscribe from the mailing list steps are as follows:
+
+1. Send an email to dev-unsubscribe@dolphinscheduler.apache.org with your subscribed email address, subject and content are arbitrary.
+
+2. Receive confirmation email and reply. After completing step 1, you will receive a confirmation email from dev-help@dolphinscheduler.apache.org (if not received, please confirm whether the email is automatically classified as spam, promotion email, subscription email, etc.) . Then reply directly to the email, or click on the link in the email to reply quickly, the subject and content are arbitrary.
+
+3. Receive a goodbye email. After completing the above steps, you will receive a goodbye email with the subject GOODBYE from dev@dolphinscheduler.apache.org, and you have successfully unsubscribed to the Apache DolphinScheduler mailing list, and you will not receive emails from dev@dolphinscheduler.apache.org.
diff --git a/docs/2.0.9/docs/en/contribute/join/unit-test.md b/docs/2.0.9/docs/en/contribute/join/unit-test.md
new file mode 100644
index 0000000..796cf59
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/join/unit-test.md
@@ -0,0 +1,118 @@
+## Unit Test Coverage
+
+### 1. The Benefits of Writing Unit Tests
+
+-    Unit tests help everyone to get into the details of the code and understand how it works.
+-    Through test cases we can find bugs and submit robust code.
+-    The test case is also a demo usage of the code.
+
+### 2. Some design principles for unit test cases
+
+-    The steps, granularity and combination of conditions should be carefully designed.
+-    Pay attention to boundary conditions.
+-    Unit tests should be well designed as well as avoiding useless code.
+-    When you find a `method` is difficult to write unit test, and if you confirm that the `method` is `bad code`, then refactor it with the developer.
+<!-- markdown-link-check-disable -->
+-    DolphinScheduler: [mockito](http://site.mockito.org/). Here are some development guides: [mockito tutorial](http://www.baeldung.com/bdd-mockito), [mockito refcard](https://dzone.com/refcardz/mockito)
+<!-- markdown-link-check-enable -->
+-    TDD(option): When you start writing a new feature, you can try writing test cases first.
+
+### 3. Test coverage setpoint
+
+-    At this stage, the default value for test coverage of Delta change codes is >= 60%, the higher the better.
+-    We can see the test reports on this page:  https://codecov.io/gh/apache/dolphinscheduler
+
+## Fundamental guidelines for unit test
+
+### 1. Isolation and singleness
+
+A test case should be accurate to the method level, and it should be possible to execute the test case alone. At the same time the focus is always on the method (only the method is tested).
+
+If the method is too complex, it should be split up again during the development phase. For test cases, it is best that a case focuses on only one branch (judgment). When changes are applied to it, they only affect the success of a test case. This will greatly facilitate our verification of issues and problem solving during the development phase. At the same time, however, it also poses a great challenge in terms of coverage.
+
+### 2. Automaticity
+
+Unit tests can be automated. Mandatory: all unit tests must be written under src/test. Also the method naming should conform to the specification. Benchmark tests are excluded.
+
+### 3. reproducibility
+
+Multiple executions (any environment, any time) result in unique and repeatable results.
+
+### 4. Lightweight
+
+That is, any environment can be implemented quickly.
+
+This requires that we don't rely on too many components, such as various spring beans and the like. These are all mock in unit tests, nd adding them would increase the speed of our single-test execution, as well as potentially passing on contamination.
+
+For some databases, other external components, etc. As far as possible, the mock client is not dependent on the external environment (the presence of any external dependencies greatly limits the portability and stability of test cases and the correctness of results), which also makes it easy for developers to test in any environment.
+
+### 5. Measurable
+
+Over the years, mockito has grown to be the NO.1 mock, but it still doesn't support mock static methods, constructors, etc. Even the website keeps saying: "Don't mock everything". So use static methods as little as possible.
+
+It is generally recommended to provide static methods only in some utility classes, in which case you don't need mocks and just use real classes. If the dependent class is not a utility class, static methods can be refactored into instance methods. This is more in line with the object-oriented design concept.
+
+### 6. Completeness
+
+Test coverage, this is a very difficult problem. For the core process, we hope to achieve 90% coverage, non-core process requirements more than 60%.
+
+High enough coverage will reduce the probability of bugs and also reduce the cost of our regression tests. This is a long process, and whenever developers add or modify code, test cases need to be refined at the same time. We hope developers and relevant code reviewer will pay enough attention to this point.
+
+### 7. Refusion invalid assertion
+
+Invalid assertions make the test itself meaningless, it has little to do with whether your code is correct or not. And there is a risk of creating an illusion of success that may last until your code is deploying to production.
+
+There are several types of invalid assertions:
+
+1.   Different types of comparisons.
+
+2.   Determines that an object or variable with a default value is not null.
+
+     This seems meaningless. Therefore, when making the relevant judgements you should pay attention to whether it contains a default value itself.
+
+3.   Assertions should be affirmative rather than negative if possible. Assertions should be within a range of predicted results, or exact values, whenever possible (otherwise you may end up with something that doesn't match your actual expectations but passes the assertion) unless your code only cares about whether it is empty or not.
+
+### 8. Some points to note for unit tests
+
+1: Thread.sleep()
+
+Try not to use Thread.sleep in your test code, it makes the test unstable and may fail unexpectedly due to the environment or load. The following approach is recommended.
+
+`Awaitility.await().atMost(...)`
+
+2: Ignore some test classes
+
+The @Ignore annotation should be linked to the relevant issue address so that subsequent developers can track the history of why the test was ignored.
+
+For example @Ignore("see #1").
+
+3: try-catch Unit test exception
+
+The test will fail when the code in the unit test throws an exception. Therefore, there is no need to use try-catch to catch exceptions.
+
+     ```java
+     @Test
+     public void testMethod() {
+       try {
+                 // Some code
+       } catch (MyException e) {
+         Assert.fail(e.getMessage());  // Noncompliant
+       }
+     }
+     ```
+You should this: 
+
+```java
+@Test
+public void testMethod() throws MyException {
+    // Some code
+}
+```
+
+4: Test exceptions
+
+When you need to test for exceptions, you should avoid including multiple method invocations in your test code (especially if there are multiple methods that can raise the same exception), and you should clearly state what you are testing for.
+
+5: Refuse to use MockitoJUnitRunner.Silent.class
+
+When an UnnecessaryStubbingException occurs in a unit test, do not first consider using @RunWith(MockitoJUnitRunner.Silent.class) to resolve it. This just hides the problem, and you should follow the exception hint to resolve the issue in question, which is not a difficult task. When the changes are done, you will find that your code is much cleaner again.
diff --git a/docs/2.0.9/docs/en/contribute/release/release-post.md b/docs/2.0.9/docs/en/contribute/release/release-post.md
new file mode 100644
index 0000000..e1d63dc
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/release/release-post.md
@@ -0,0 +1,32 @@
+# Release Post
+
+We still have some publish task to do after we send the announcement mail, currently we have to publish Docker images to
+Docker Hub and also publish pydolphinscheduler to PyPI.
+
+## Publish Docker Image
+
+we already have the exists CI to publish the latest Docker image to GitHub container register with [config](https://github.com/apache/dolphinscheduler/blob/d80cf21456265c9d84e642bdb4db4067c7577fc6/.github/workflows/publish-docker.yaml#L55-L63).
+We could reuse the main command the CI run and publish our Docker images to Docker Hub by single command.
+
+```bash
+# Please change the <VERSION> place hold to the version you release
+./mvnw -B clean deploy \
+    -Dmaven.test.skip \
+    -Dmaven.javadoc.skip \
+    -Dmaven.checkstyle.skip \
+    -Dmaven.deploy.skip \
+    -Ddocker.tag=<VERSION> \
+    -Ddocker.hub=apache \
+    -Pdocker,release
+```
+
+## Publish pydolphinscheduler to PyPI
+
+Python API need to release to PyPI for easier download and use, you can see more detail in [Python API release](https://github.com/apache/dolphinscheduler/blob/2.0.9/dolphinscheduler-python/pydolphinscheduler/RELEASE.md#to-pypi)
+to finish PyPI release.
+
+## Get All Contributors
+
+You might need all contributors in current release when you want to publish the release news or announcement, you could
+use the git command `git log --pretty="%an" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> | sort | uniq` to auto generate
+the git author name.
diff --git a/docs/2.0.9/docs/en/contribute/release/release-prepare.md b/docs/2.0.9/docs/en/contribute/release/release-prepare.md
new file mode 100644
index 0000000..fe51973
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/release/release-prepare.md
@@ -0,0 +1,31 @@
+# Release Preparation
+
+## Check release-docs
+
+Compared with the last release, the `release-docs` of the current release needs to be updated to the latest, if there are dependencies and versions changes
+
+ - `dolphinscheduler-dist/release-docs/LICENSE`
+ - `dolphinscheduler-dist/release-docs/NOTICE`
+ - `dolphinscheduler-dist/release-docs/licenses`
+
+## Update Version
+
+For example, to release `x.y.z`, the following updates are required:
+
+- Version in the code:
+  - `sql`:
+    - `dolphinscheduler_mysql.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `dolphinscheduler_postgre.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `dolphinscheduler_h2.sql`: `t_ds_version` needs to be updated to x.y.z
+    - `upgrade`: whether to add`x.y.z_schema`
+    - `soft_version`: need to be updated to x.y.z
+  - `deploy/docker/.env`: `HUB` change to `apache`,`TAG` change to `x.y.z`
+  - `deploy/kubernetes/dolphinscheduler`:
+    - `Chart.yaml`: `appVersion` needs to be updated to x.y.z (`version` is helm chart version,incremented and different from x.y.z)
+    - `values.yaml`: `image.tag` needs to be updated to x.y.z
+  - `dolphinscheduler-python/pydolphinscheduler/setup.py`: change `version` to x.y.z
+- Version in the docs:
+  - Change the placeholder `<version>`(except `pom`)  to the `x.y.z` in directory `docs`
+  - Add new history version
+    - `docs/docs/en/history-versions.md` and `docs/docs/zh/history-versions.md`: Add the new version and link for `x.y.z`
+  - `docs/configs/docsdev.js`: change `/dev/` to `/x.y.z/`
diff --git a/docs/2.0.9/docs/en/contribute/release/release.md b/docs/2.0.9/docs/en/contribute/release/release.md
new file mode 100644
index 0000000..cf5190d
--- /dev/null
+++ b/docs/2.0.9/docs/en/contribute/release/release.md
@@ -0,0 +1,540 @@
+# Release Guide
+
+## Check Your Environment
+
+To make sure you could successfully complete the release for DolphinScheduler, you should check your environment and make sure
+all conditions are met, if any or them are missing, you should install them and make sure them work.
+
+```shell
+# JDK 1.8 above is requests
+java -version
+# Maven requests
+mvn -version
+# Python 3.6 above is requests, and you have to make keyword `python` work in your terminal and version match
+python --version
+```
+
+## GPG Settings
+
+### Install GPG
+
+Download installation package on [official GnuPG website](https://www.gnupg.org/download/index.html).
+The command of GnuPG 1.x version can differ a little from that of 2.x version.
+The following instructions take `GnuPG-2.1.23` version for example.
+
+After the installation, execute the following command to check the version number.
+
+```shell
+gpg --version
+```
+
+### Create Key
+
+After the installation, execute the following command to create key.
+
+This command indicates `GnuPG-2.x` can be used:
+
+```shell
+gpg --full-gen-key
+```
+
+This command indicates `GnuPG-1.x` can be used:
+
+```shell
+gpg --gen-key
+```
+
+Finish the key creation according to instructions, **Notice: Please use Apache mails and its password for key creation.**
+
+```shell
+gpg (GnuPG) 2.0.12; Copyright (C) 2009 Free Software Foundation, Inc.
+This is free software: you are free to change and redistribute it.
+There is NO WARRANTY, to the extent permitted by law.
+
+Please select what kind of key you want:
+  (1) RSA and RSA (default)
+  (2) DSA and Elgamal
+  (3) DSA (sign only)
+  (4) RSA (sign only)
+Your selection? 1
+RSA keys may be between 1024 and 4096 bits long.
+What keysize do you want? (2048) 4096
+Requested keysize is 4096 bits
+Please specify how long the key should be valid.
+        0 = key does not expire
+     <n>  = key expires in n days
+     <n>w = key expires in n weeks
+     <n>m = key expires in n months
+     <n>y = key expires in n years
+Key is valid for? (0)
+Key does not expire at all
+Is this correct? (y/N) y
+
+GnuPG needs to construct a user ID to identify your key.
+
+Real name: ${Input username}
+Email address: ${Input email}
+Comment: ${Input comment}
+You selected this USER-ID:
+   "${Inputed username} (${Inputed comment}) <${Inputed email}>"
+
+Change (N)ame, (C)omment, (E)mail or (O)kay/(Q)uit? O
+You need a Passphrase to protect your secret key. # Input your Apache mail passwords
+```
+
+### Check Generated Key
+
+```shell
+gpg --list-keys
+```
+
+Execution Result:
+
+```shell
+pub   4096R/85E11560 2019-11-15
+uid                  ${Username} (${Comment}) <{Email}>
+sub   4096R/A63BC462 2019-11-15
+```
+
+Among them, 85E11560 is public key ID.
+
+### Upload the Public Key to Key Server
+
+The command is as follow:
+
+```shell
+gpg --keyserver hkp://pool.sks-keyservers.net --send-key 85E11560
+```
+
+`pool.sks-keyservers.net` is randomly chosen from [public key server](https://sks-keyservers.net/status/).
+Each server will automatically synchronize with one another, so it would be okay to choose any one, a backup keys servers
+is `gpg --keyserver hkp://keyserver.ubuntu.com --send-key <YOUR_KEY_ID>`
+
+## Apache Maven Central Repository Release
+
+### Set `settings-security.xml` and `settings.xml`
+
+In this section, we add Apache server maven configuration to prepare the release, we have to add `settings-security.xml` according
+to [here](http://maven.apache.org/guides/mini/guide-encryption.html) firstly and then change your `~/.m2/settings.xml` like below
+
+```xml
+<settings>
+  <servers>
+    <server>
+      <id>apache.snapshots.https</id>
+      <username> <!-- APACHE LDAP username --> </username>
+      <password> <!-- APACHE LDAP encrypted password --> </password>
+    </server>
+    <server>
+      <id>apache.releases.https</id>
+      <username> <!-- APACHE LDAP username --> </username>
+      <password> <!-- APACHE LDAP encrypted password --> </password>
+    </server>
+  </servers>
+</settings>
+```
+
+### Set Release in Environment
+
+We will use the release version, your github name and your Apache username below several times, so it is better to store
+it to bash variable for easier use.
+
+```shell
+VERSION=<THE-VERSION-YOU-RELEASE>
+GH_USERNAME=<YOUR-GITHUB-USERNAME>
+A_USERNAME=<YOUR-APACHE-USERNAME>
+```
+
+> Note: We can use the variable directly in you bash after we set environment, without changing anything. For example, we
+> can use command `git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git` to clone the release branch
+> and it can be success by covert the `"${VERSION}"` to `<THE-VERSION-YOU-RELEASE>`. But you have to change `<VERSION>` manually in
+> some of not bash step like [vote mail](#vote-procedure), we using `<VERSION>` instead of `"${VERSION}"` to notice release
+> manager they have to change by hand.
+
+### Create Release Branch
+
+In this section, we dwonload source code from github and create new branch to release
+
+```shell
+git clone -b "${VERSION}"-prepare https://github.com/apache/dolphinscheduler.git
+cd ~/dolphinscheduler/
+git pull
+git checkout -b "${VERSION}"-release
+git push origin "${VERSION}"-release
+```
+
+### Pre-Release Check
+
+```shell
+# make gpg command could be run in maven correct
+export GPG_TTY=$(tty)
+
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DdryRun=true -Dusername="${GH_USERNAME}"
+```
+
+* `-Prelease,python`: choose release and python profile, which will pack all the source codes, jar files and executable binary packages, and Python distribute package.
+* `-DautoVersionSubmodules=true`: it can make the version number is inputted only once and not for each sub-module.
+* `-DdryRun=true`: dry run which means not to generate or submit new version number and new tag.
+
+### Prepare for the Release
+
+First, clean local pre-release check information.
+
+```shell
+mvn release:clean
+```
+
+Then, prepare to execute the release.
+
+```shell
+mvn release:prepare -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -DpushChanges=false -Dusername="${GH_USERNAME}"
+```
+
+It is basically the same as the previous rehearsal command, but deleting `-DdryRun=true` parameter.
+
+* `-DpushChanges=fals`: do not submit the edited version number and tag to GitHub automatically.
+
+> Note: You have to config your git `user.name` and `user.password` by command `git config --global user.email "you@example.com"`
+> and `git config --global user.name "Your Name"` if you meet some mistake like **Please tell me who you are.**
+> from git.
+
+After making sure there is no mistake in local files, submit them to GitHub.
+
+```shell
+git push -u origin "${VERSION}"-release
+git push origin --tags
+```
+
+<!-- markdown-link-check-disable -->
+
+> Note1: In this step, you should use github token for password because native password no longer supported, you can see
+> https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token for more
+> detail about how to create token about it.
+
+> Note2: After the command done, it will auto-created `release.properties` file and `*.Backup` files, their will be need
+> in the following command and DO NOT DELETE THEM
+
+<!-- markdown-link-check-enable -->
+
+### Deploy the Release
+
+```shell
+mvn release:perform -Prelease,python -Darguments="-Dmaven.test.skip=true -Dcheckstyle.skip=true -Dmaven.javadoc.skip=true" -DautoVersionSubmodules=true -Dusername="${GH_USERNAME}"
+```
+
+After that command is executed, the version to be released will be uploaded to Apache staging repository automatically.
+Go to [apache staging repositories](https://repository.apache.org/#stagingRepositories) and login by Apache LDAP. then you can see the uploaded version, the content of `Repository` column is the `${STAGING.REPOSITORY}`.
+Click `Close` to tell Nexus that the construction is finished, because only in this way, this version can be usable.
+If there is any problem in gpg signature, `Close` will fail, but you can see the failure information through `Activity`.
+
+## Apache SVN Repository Release
+
+### Checkout dolphinscheduler Release Directory
+
+If there is no local work directory, create one at first.
+
+```shell
+mkdir -p ~/ds_svn/dev/
+cd ~/ds_svn/dev/
+```
+
+After the creation, checkout dolphinscheduler release directory from Apache SVN.
+
+```shell
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/dev/dolphinscheduler
+cd ~/ds_svn/dev/dolphinscheduler
+```
+
+### Add gpg Public Key
+
+Only the account in its first deployment needs to add that.
+It is alright for `KEYS` to only include the public key of the deployed account.
+
+```shell
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+```
+
+### Add the Release Content to SVN Directory
+
+Create folder by version number.
+
+```shell
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+mkdir -p ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+cd ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+```
+
+Add source code packages, binary packages and executable binary packages to SVN working directory.
+
+```shell
+# Source and binary tarball for main code
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/*.tar.gz.asc ~/ds_svn/dev/dolphinscheduler/"${VERSION}"
+
+# Source and binary tarball for Python API
+cp -f ~/dolphinscheduler/dolphinscheduler-dist/target/python/* ~/ds_svn/dev/dolphinscheduler/"${VERSION}"/python
+```
+
+### Generate sign files
+
+```shell
+shasum -a 512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz >> apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -b -a 512 apache-dolphinscheduler-"${VERSION}"-bin.tar.gz >> apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -a 512 apache-dolphinscheduler-python-"${VERSION}".tar.gz >> apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -b -a 512 apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl >> apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### Commit to Apache SVN
+
+```shell
+cd ~/ds_svn/dev/dolphinscheduler
+svn add *
+svn --username="${A_USERNAME}" commit -m "release ${VERSION}"
+```
+## Check Release
+
+### Check sha512 hash
+
+```shell
+shasum -c apache-dolphinscheduler-"${VERSION}"-src.tar.gz.sha512
+shasum -c apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.sha512
+cd python
+shasum -c apache-dolphinscheduler-python-"${VERSION}".tar.gz.sha512
+shasum -c apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.sha512
+cd ../
+```
+
+### Check gpg Signature
+
+First, import releaser's public key.
+Import KEYS from SVN repository to local. (The releaser does not need to import again; the checking assistant needs to import it, with the user name filled as the releaser's. )
+
+```shell
+curl https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS >> KEYS
+gpg --import KEYS
+gpg --edit-key "${A_USERNAME}"
+  > trust
+
+Please decide how far you trust this user to correctly verify other users' keys
+(by looking at passports, checking fingerprints from different sources, etc.)
+
+  1 = I don't know or won't say
+  2 = I do NOT trust
+  3 = I trust marginally
+  4 = I trust fully
+  5 = I trust ultimately
+  m = back to the main menu
+
+Your decision? 5
+
+  > save
+```
+
+Then, check the gpg signature.
+
+```shell
+gpg --verify apache-dolphinscheduler-"${VERSION}"-src.tar.gz.asc
+gpg --verify apache-dolphinscheduler-"${VERSION}"-bin.tar.gz.asc
+cd python
+gpg --verify apache-dolphinscheduler-python-"${VERSION}".tar.gz.asc
+gpg --verify apache_dolphinscheduler-python-"${VERSION}"-py3-none-any.whl.asc
+cd ../
+```
+
+> Note: You have to create gpg signature manually when you can not find your `asc` file, the command
+> `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"-bin.tar.gz` and
+> `gpg --armor --detach-sign --digest-algo=SHA512 apache-dolphinscheduler-"${VERSION}"-src.tar.gz` will create them
+
+### Check Released Files
+
+#### Check source package
+
+Decompress `apache-dolphinscheduler-<VERSION>-src.tar.gz` and `python/apache-dolphinscheduler-python-<VERSION>.tar.gz` then check the following items:
+
+*   Check whether source tarball is oversized for including nonessential files
+*   `LICENSE` and `NOTICE` files exist
+*   Correct year in `NOTICE` file
+*   There is only text files but no binary files
+*   All source files have ASF headers
+*   Codes can be compiled and pass the unit tests (mvn install)
+*   The contents of the release match with what's tagged in version control (diff -r a verify_dir tag_dir)
+*   Check if there is any extra files or folders, empty folders for example
+
+#### Check binary packages
+
+Decompress `apache-dolphinscheduler-<VERSION>-src.tar.gz` and `python/apache-dolphinscheduler-python-<VERSION>-bin.tar.gz`
+to check the following items:
+
+- `LICENSE` and `NOTICE` files exist
+- Correct year in `NOTICE` file
+- Check the third party dependency license:
+  - The software have a compatible license
+  - All software licenses mentioned in `LICENSE`
+  - All the third party dependency licenses are under `licenses` folder
+  - If it depends on Apache license and has a `NOTICE` file, that `NOTICE` file need to be added to `NOTICE` file of the release
+
+## Call for a Vote
+
+### Update Release Notes
+
+You should create a release note in GitHub by [new release note](https://github.com/apache/dolphinscheduler/releases/new).
+It should be done before vote mail because we need the release note in the mail. You could use command
+`git log --pretty="- %s" <PREVIOUS-RELEASE-SHA>..<CURRENT-RELEASE-SHA> > changelog.md` to creat the changelog(some log
+maybe not correct, you should filter them by yourself) and classify them and paste them to GitHub release note page
+
+### Vote procedure
+
+1. DolphinScheduler community vote: send the vote e-mail to `dev@dolphinscheduler.apache.org`.
+PMC needs to check the rightness of the version according to the document before they vote.
+After at least 72 hours and with at least 3 `+1 and no -1 PMC member` votes, it can come to the next stage of the vote.
+
+2. Announce the vote result: send the result vote e-mail to `dev@dolphinscheduler.apache.org`。
+
+### Vote Templates
+
+#### DolphinScheduler Community Vote Template
+
+Title:
+
+```txt
+[VOTE] Release Apache DolphinScheduler <VERSION>
+```
+
+Body:
+
+```txt
+Hello DolphinScheduler Community,
+
+This is a call for vote to release Apache DolphinScheduler version <VERSION>
+
+Release notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+The release candidates: https://dist.apache.org/repos/dist/dev/dolphinscheduler/<VERSION>/
+
+Maven 2 staging repository: https://repository.apache.org/content/repositories/<VERSION>/org/apache/dolphinscheduler/
+
+Git tag for the release: https://github.com/apache/dolphinscheduler/tree/<VERSION>
+
+Release Commit ID: https://github.com/apache/dolphinscheduler/commit/<SHA-VALUE>
+
+Keys to verify the Release Candidate: https://dist.apache.org/repos/dist/dev/dolphinscheduler/KEYS
+
+Look at here for how to verify this release candidate: https://dolphinscheduler.apache.org/en-us/community/release.html
+
+The vote will be open for at least 72 hours or until necessary number of votes are reached.
+
+Please vote accordingly:
+
+[ ] +1 approve
+[ ] +0 no opinion
+[ ] -1 disapprove with the reason
+
+Checklist for reference:
+
+[ ] Download links are valid.
+[ ] Checksums and PGP signatures are valid.
+[ ] Source code artifacts have correct names matching the current release.
+[ ] LICENSE and NOTICE files are correct for each DolphinScheduler repo.
+[ ] All files have license headers if necessary.
+[ ] No compiled archives bundled in source archive.
+```
+
+2. Announce the vote result:
+
+Body:
+
+```txt
+The vote to release Apache DolphinScheduler <VERSION> has passed.Here is the vote result,
+
+4 PMC member +1 votes:
+
+xxx
+xxx
+xxx
+xxx
+
+1 community +1 vote:
+xxx
+
+Thanks everyone for taking time to check this release and help us.
+```
+
+## Finish the Release
+
+### Move source packages, binary packages from the `dev` directory to `release` directory
+
+```shell
+svn mv https://dist.apache.org/repos/dist/dev/dolphinscheduler/"${VERSION}" https://dist.apache.org/repos/dist/release/dolphinscheduler/
+```
+
+### Export you new gpg KEYS from dev to release(optional)
+
+Only if the first time you release with this gpg KEY, including it is you first release or you change your KEY
+
+```shell
+mkdir -p ~/ds_svn/release/
+cd ~/ds_svn/release/
+svn --username="${A_USERNAME}" co https://dist.apache.org/repos/dist/release/dolphinscheduler
+gpg -a --export <YOUR-GPG-KEY-ID> >> KEYS
+svn add *
+svn --username="${A_USERNAME}" commit -m "new key <YOUR-GPG-KEY-ID> add"
+```
+
+### Update Document
+
+Website should be present before you send the announce mail this section will tell you how to change the website. For example,
+the release version is `<VERSION>`, the following updates are required(note it will take effect immediately when the PR is merged):
+
+- Repository **apache/dolphinscheduler-website**:
+  - `download/en-us/download.md` and `download/zh-cn/download.md`: add the download of the `<VERSION>` release package
+  - `scripts/conf.sh`: Add new release version `<VERSION>` key-value pair to variable `DEV_RELEASE_DOCS_VERSIONS`
+- Repository **apache/dolphinscheduler**:
+  - `docs/configs/site.js`:
+    - `docsLatest`: update to `<VERSION>`
+    - `docs0`: The `text` of two places of `en-us/zh-cn` needs to be updated to `latest(<VERSION>)`
+    - `docsxyz`: Add a drop-down menu with `key` as `docsxyz` and `text` as `<VERSION>` in `children` of two places of `en-us/zh-cn`
+  - `docs/configs/index.md.jsx`: Add `<VERSION>: docsxyzConfig`
+  - `docs/docs/en/history-versions.md` and `docs/docs/zh/history-versions.md`: Add new `<VERSION>` release docs.
+  - `.github/ISSUE_TEMPLATE/bug-report.yml`: DolphinScheduler's GitHub [bug-report](https://github.com/apache/dolphinscheduler/blob/dev/.github/ISSUE_TEMPLATE/bug-report.yml)
+    issue template have **Version** selection bottom. So after we release DolphinScheduler we should and the new `<VERSION>` to
+    bug-report.yml
+
+### Find DolphinScheduler in [apache staging repositories](https://repository.apache.org/#stagingRepositories) and click `Release`
+
+### Send Announcement E-mail Community
+
+You should send announcement E-mail after release process finished. The E-mail should send to `dev@dolphinscheduler.apache.org`
+and cc to `announce@apache.org`.
+
+Announcement e-mail template as below:
+
+Title:
+
+```txt
+[ANNOUNCE] Release Apache DolphinScheduler <VERSION>
+```
+
+Body:
+
+```txt
+Hi all,
+
+We are glad to announce the release of Apache DolphinScheduler <VERSION>. Once again I would like to express my thanks to your help.
+
+Dolphin Scheduler is a distributed and easy-to-extend visual workflow scheduler system,
+dedicated to solving the complex task dependencies in data processing, making the scheduler system out of the box for data processing.
+
+
+Download Links: https://dolphinscheduler.apache.org/en-us/download
+
+Release Notes: https://github.com/apache/dolphinscheduler/releases/tag/<VERSION>
+
+Website: https://dolphinscheduler.apache.org/
+
+DolphinScheduler Resources:
+- Issue: https://github.com/apache/dolphinscheduler/issues/
+- Mailing list: dev@dolphinscheduler.apache.org
+- Documents: https://dolphinscheduler.apache.org/zh-cn/docs/<VERSION>/user_doc/about/introduction.html
+```
diff --git a/docs/2.0.9/docs/en/guide/alert/alert_plugin_user_guide.md b/docs/2.0.9/docs/en/guide/alert/alert_plugin_user_guide.md
new file mode 100644
index 0000000..a26ec29
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/alert/alert_plugin_user_guide.md
@@ -0,0 +1,12 @@
+## How to create alert plugins and alert groups
+
+after version 2.0.0, users need to create alert instances, and then associate them with alert groups, and an alert group can use multiple alert instances, and we will notify them one by one.
+
+First of all, you need to go to the Security Center, select Alarm Group Management, then click Alarm Instance Management on the left, then create an alarm instance, then select the corresponding alarm plug-in and fill in the relevant alarm parameters.
+
+Then select Alarm Group Management, create an alarm group, and select the corresponding alarm instance.
+
+<img src="/img/alert/alert_step_1.png">
+<img src="/img/alert/alert_step_2.png">
+<img src="/img/alert/alert_step_3.png">
+<img src="/img/alert/alert_step_4.png">
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/alert/dingtalk.md b/docs/2.0.9/docs/en/guide/alert/dingtalk.md
new file mode 100644
index 0000000..18263f9
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/alert/dingtalk.md
@@ -0,0 +1,26 @@
+# DingTalk
+
+If you need to use DingTalk for alerting, please create an alert instance in the alert instance management and select the DingTalk plugin. The configuration example of DingTalk is as follows:
+
+![dingtalk-plugin](/img/alert/dingtalk-plugin.png)
+
+parameter configuration
+
+* Webhook
+  > The format is as follows: https://oapi.dingtalk.com/robot/send?access_token=XXXXXX
+* Keyword
+  > Custom keywords for security settings
+* Secret
+  > Signature of security settings
+* MessageType
+  > Support both text and markdown types
+
+When a custom bot sends a message, you can specify the "@person list" by your mobile phone number. When the people in the "@people list" receive the message, there will be a @ message reminder. Do not disturb conversations still notify reminders, and "someone @ you" appears on the fold
+* @Mobiles
+  > The mobile phone number of the "@person"
+* @UserIds
+  > The userid by "@person"
+* @All
+  > Is @Everyone
+
+[DingTalk Custom Robot Access Development Documentation](https://open.dingtalk.com/document/robots/custom-robot-access)
diff --git a/docs/2.0.9/docs/en/guide/alert/enterprise-wechat.md b/docs/2.0.9/docs/en/guide/alert/enterprise-wechat.md
new file mode 100644
index 0000000..2baea45
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/alert/enterprise-wechat.md
@@ -0,0 +1,13 @@
+# Enterprise WeChat
+
+If you need to use Enterprise WeChat to alert, please create an alarm Instance in warning instance manage, and then choose the wechat plugin. The configuration example of enterprise WeChat is as follows
+
+![enterprise-wechat-plugin](/img/alert/enterprise-wechat-plugin.png)
+
+Where send type corresponds to app and appchat respectively:
+
+APP: https://work.weixin.qq.com/api/doc/90000/90135/90236
+
+APPCHAT: https://work.weixin.qq.com/api/doc/90000/90135/90248
+
+user.send.msg corresponds to the content in the document. The variable of the corresponding value is {msg}
diff --git a/docs/2.0.9/docs/en/guide/datasource/hive.md b/docs/2.0.9/docs/en/guide/datasource/hive.md
new file mode 100644
index 0000000..20d86d8
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/datasource/hive.md
@@ -0,0 +1,42 @@
+# HIVE
+
+## Use HiveServer2
+
+ <p align="center">
+    <img src="/img/hive-en.png" width="80%" />
+  </p>
+
+- Data source: select HIVE
+- Data source name: enter the name of the data source
+- Description: Enter a description of the data source
+- IP/Host Name: Enter the IP connected to HIVE
+- Port: Enter the port connected to HIVE
+- Username: Set the username for connecting to HIVE
+- Password: Set the password for connecting to HIVE
+- Database name: Enter the name of the database connected to HIVE
+- Jdbc connection parameters: parameter settings for HIVE connection, filled in in JSON form
+
+> NOTICE: If you wish execute multiple HIVE SQL in the same session, you could set `support.hive.oneSession = true` in
+> configure `common.properties`. It is helpful when you try to set env before running HIVE SQL. Parameter
+> `support.hive.oneSession` default value is `false` and SQL would run in different session if their more than one.
+
+## Use HiveServer2 HA Zookeeper
+
+ <p align="center">
+    <img src="/img/hive1-en.png" width="80%" />
+  </p>
+Note: If Kerberos is not enabled, ensure that the parameter `hadoop.security.authentication.startup.state`. The state value is `false`, Parameter `java.security.krb5.conf.path` value is null or empty If **Kerberos** is enabled, it needs to be in common Properties configure the following parameters
+
+```conf
+# whether to startup kerberos
+hadoop.security.authentication.startup.state=true
+
+# java.security.krb5.conf path
+java.security.krb5.conf.path=/opt/krb5.conf
+
+# login user from keytab username
+login.user.keytab.username=hdfs-mycluster@ESZ.COM
+
+# login user from keytab path
+login.user.keytab.path=/opt/hdfs.headless.keytab
+```
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/datasource/introduction.md b/docs/2.0.9/docs/en/guide/datasource/introduction.md
new file mode 100644
index 0000000..c112812
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/datasource/introduction.md
@@ -0,0 +1,7 @@
+
+# Data Source
+
+Data source center supports MySQL, POSTGRESQL, HIVE/IMPALA, SPARK, CLICKHOUSE, ORACLE, SQLSERVER and other data sources
+
+- Click "Data Source Center -> Create Data Source" to create different types of data sources according to requirements.
+- Click "Test Connection" to test whether the data source can be successfully connected.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/datasource/mysql.md b/docs/2.0.9/docs/en/guide/datasource/mysql.md
new file mode 100644
index 0000000..7807a00
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/datasource/mysql.md
@@ -0,0 +1,16 @@
+# MySQL
+
+
+- Data source: select MYSQL
+- Data source name: enter the name of the data source
+- Description: Enter a description of the data source
+- IP hostname: enter the IP to connect to MySQL
+- Port: Enter the port to connect to MySQL
+- Username: Set the username for connecting to MySQL
+- Password: Set the password for connecting to MySQL
+- Database name: Enter the name of the database connected to MySQL
+- Jdbc connection parameters: parameter settings for MySQL connection, filled in in JSON form
+
+<p align="center">
+   <img src="/img/mysql-en.png" width="80%" />
+ </p>
diff --git a/docs/2.0.9/docs/en/guide/datasource/postgresql.md b/docs/2.0.9/docs/en/guide/datasource/postgresql.md
new file mode 100644
index 0000000..77a4fd7
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/datasource/postgresql.md
@@ -0,0 +1,15 @@
+# POSTGRESQL
+
+- Data source: select POSTGRESQL
+- Data source name: enter the name of the data source
+- Description: Enter a description of the data source
+- IP/Host Name: Enter the IP to connect to POSTGRESQL
+- Port: Enter the port to connect to POSTGRESQL
+- Username: Set the username for connecting to POSTGRESQL
+- Password: Set the password for connecting to POSTGRESQL
+- Database name: Enter the name of the database connected to POSTGRESQL
+- Jdbc connection parameters: parameter settings for POSTGRESQL connection, filled in in JSON form
+
+<p align="center">
+   <img src="/img/postgresql-en.png" width="80%" />
+ </p>
diff --git a/docs/2.0.9/docs/en/guide/datasource/spark.md b/docs/2.0.9/docs/en/guide/datasource/spark.md
new file mode 100644
index 0000000..ebdff80
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/datasource/spark.md
@@ -0,0 +1,15 @@
+# Spark
+
+<p align="center">
+   <img src="/img/spark-en.png" width="80%" />
+ </p>
+
+- Data source: select Spark
+- Data source name: enter the name of the data source
+- Description: Enter a description of the data source
+- IP/Hostname: Enter the IP connected to Spark
+- Port: Enter the port connected to Spark
+- Username: Set the username for connecting to Spark
+- Password: Set the password for connecting to Spark
+- Database name: Enter the name of the database connected to Spark
+- Jdbc connection parameters: parameter settings for Spark connection, filled in in JSON form
diff --git a/docs/2.0.9/docs/en/guide/expansion-reduction.md b/docs/2.0.9/docs/en/guide/expansion-reduction.md
new file mode 100644
index 0000000..99e32f1
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/expansion-reduction.md
@@ -0,0 +1,251 @@
+<!-- markdown-link-check-disable -->
+
+# DolphinScheduler Expansion and Reduction
+
+## 1. Expansion 
+This article describes how to add a new master service or worker service to an existing DolphinScheduler cluster.
+```
+ Attention: There cannot be more than one master service process or worker service process on a physical machine.
+       If the physical machine where the expansion master or worker node is located has already installed the scheduled service, skip to [1.4 Modify configuration] Edit the configuration file `conf/config/install_config.conf` on **all ** nodes, add masters or workers parameter, and restart the scheduling cluster.
+```
+
+### 1.1 Basic software installation (please install the mandatory items yourself)
+
+* [required] [JDK](https://www.oracle.com/technetwork/java/javase/downloads/index.html) (1.8+):Must be installed, please install and configure JAVA_HOME and PATH variables under /etc/profile
+* [optional] If the expansion is a worker node, you need to consider whether to install an external client, such as Hadoop, Hive, Spark Client.
+
+
+```markdown
+ Attention: DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but will only call their Client for the corresponding task submission.
+```
+
+### 1.2 Get installation package
+- Check which version of DolphinScheduler is used in your existing environment, and get the installation package of the corresponding version, if the versions are different, there may be compatibility problems.
+- Confirm the unified installation directory of other nodes, this article assumes that DolphinScheduler is installed in /opt/ directory, and the full path is /opt/dolphinscheduler.
+- Please download the corresponding version of the installation package to the server installation directory, uncompress it and rename it to dolphinscheduler and store it in the /opt directory. 
+- Add database dependency package, this article uses Mysql database, add mysql-connector-java driver package to /opt/dolphinscheduler/lib directory.
+```shell
+# create the installation directory, please do not create the installation directory in /root, /home and other high privilege directories 
+mkdir -p /opt
+cd /opt
+# decompress
+tar -zxvf apache-dolphinscheduler-2.0.9-bin.tar.gz -C /opt 
+cd /opt
+mv apache-dolphinscheduler-2.0.9-bin  dolphinscheduler
+```
+
+```markdown
+ Attention: The installation package can be copied directly from an existing environment to an expanded physical machine for use.
+```
+
+### 1.3 Create Deployment Users
+
+- Create deployment users on **all** expansion machines, and be sure to configure sudo-free. If we plan to deploy scheduling on four expansion machines, ds1, ds2, ds3, and ds4, we first need to create deployment users on each machine
+
+```shell
+# to create a user, you need to log in with root and set the deployment user name, please modify it yourself, later take dolphinscheduler as an example
+useradd dolphinscheduler;
+
+# set the user password, please change it by yourself, later take dolphinscheduler123 as an example
+echo "dolphinscheduler123" | passwd --stdin dolphinscheduler
+
+# configure sudo password-free
+echo 'dolphinscheduler  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' >> /etc/sudoers
+sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers
+
+```
+
+```markdown
+ Attention:
+ - Since it is sudo -u {linux-user} to switch between different Linux users to run multi-tenant jobs, the deploying user needs to have sudo privileges and be password free.
+ - If you find the line "Default requiretty" in the /etc/sudoers file, please also comment it out.
+ - If resource uploads are used, you also need to assign read and write permissions to the deployment user on `HDFS or MinIO`.
+```
+
+### 1.4 Modify configuration
+
+- From an existing node such as Master/Worker, copy the conf directory directly to replace the conf directory in the new node. After copying, check if the configuration items are correct.
+    
+    ```markdown
+    Highlights:
+    datasource.properties: database connection information 
+    zookeeper.properties: information for connecting zk 
+    common.properties: Configuration information about the resource store (if hadoop is set up, please check if the core-site.xml and hdfs-site.xml configuration files exist).
+    env/dolphinscheduler_env.sh: environment Variables
+    ````
+
+- Modify the `dolphinscheduler_env.sh` environment variable in the conf/env directory according to the machine configuration (take the example that the software used is installed in /opt/soft)
+
+    ```shell
+        export HADOOP_HOME=/opt/soft/hadoop
+        export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+        # export SPARK_HOME1=/opt/soft/spark1
+        export SPARK_HOME2=/opt/soft/spark2
+        export PYTHON_HOME=/opt/soft/python
+        export JAVA_HOME=/opt/soft/jav
+        export HIVE_HOME=/opt/soft/hive
+        export FLINK_HOME=/opt/soft/flink
+        export DATAX_HOME=/opt/soft/datax/bin/datax.py
+        export PATH=$HADOOP_HOME/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
+    
+    ```
+
+    `Attention: This step is very important, such as JAVA_HOME and PATH is necessary to configure, not used can be ignored or commented out`
+
+
+- Softlink the JDK to /usr/bin/java (still using JAVA_HOME=/opt/soft/java as an example)
+
+    ```shell
+    sudo ln -s /opt/soft/java/bin/java /usr/bin/java
+    ```
+
+ - Modify the configuration file `conf/config/install_config.conf` on the **all** nodes, synchronizing the following configuration.
+    
+    * To add a new master node, you need to modify the ips and masters parameters.
+    * To add a new worker node, modify the ips and workers parameters.
+
+```shell
+# which machines to deploy DS services on, separated by commas between multiple physical machines
+ips="ds1,ds2,ds3,ds4"
+
+# ssh port,default 22
+sshPort="22"
+
+# which machine the master service is deployed on
+masters="existing master01,existing master02,ds1,ds2"
+
+# the worker service is deployed on which machine, and specify the worker belongs to which worker group, the following example of "default" is the group name
+workers="existing worker01:default,existing worker02:default,ds3:default,ds4:default"
+
+```
+- If the expansion is for worker nodes, you need to set the worker group. Please refer to the security [Worker grouping](./security.md)
+
+- On all new nodes, change the directory permissions so that the deployment user has access to the dolphinscheduler directory
+
+```shell
+sudo chown -R dolphinscheduler:dolphinscheduler dolphinscheduler
+```
+
+### 1.4. Restart the cluster & verify
+
+- restart the cluster
+
+```shell
+# stop command:
+
+bin/stop-all.sh # stop all services
+
+sh bin/dolphinscheduler-daemon.sh stop master-server  # stop master service
+sh bin/dolphinscheduler-daemon.sh stop worker-server  # stop worker service
+sh bin/dolphinscheduler-daemon.sh stop logger-server  # stop logger service
+sh bin/dolphinscheduler-daemon.sh stop api-server     # stop api    service
+sh bin/dolphinscheduler-daemon.sh stop alert-server   # stop alert  service
+
+
+# start command::
+bin/start-all.sh # start all services
+
+sh bin/dolphinscheduler-daemon.sh start master-server  # start master service
+sh bin/dolphinscheduler-daemon.sh start worker-server  # start worker service
+sh bin/dolphinscheduler-daemon.sh start logger-server  # start logger service
+sh bin/dolphinscheduler-daemon.sh start api-server     # start api    service
+sh bin/dolphinscheduler-daemon.sh start alert-server   # start alert  service
+
+```
+
+```
+ Attention: When using stop-all.sh or stop-all.sh, if the physical machine executing the command is not configured to be ssh-free on all machines, it will prompt for the password
+```
+
+
+- After the script is completed, use the `jps` command to see if each node service is started (`jps` comes with the `Java JDK`)
+
+```
+    MasterServer         ----- master service
+    WorkerServer         ----- worker service
+    LoggerServer         ----- logger service
+    ApiApplicationServer ----- api    service
+    AlertServer          ----- alert  service
+```
+
+After successful startup, you can view the logs, which are stored in the logs folder.
+
+```Log Path
+ logs/
+    ├── dolphinscheduler-alert-server.log
+    ├── dolphinscheduler-master-server.log
+    |—— dolphinscheduler-worker-server.log
+    |—— dolphinscheduler-api-server.log
+    |—— dolphinscheduler-logger-server.log
+```
+If the above services are started normally and the scheduling system page is normal, check whether there is an expanded Master or Worker service in the [Monitor] of the web system. If it exists, the expansion is complete.
+
+-----------------------------------------------------------------------------
+
+## 2. Reduction
+The reduction is to reduce the master or worker services for the existing DolphinScheduler cluster.
+There are two steps for shrinking. After performing the following two steps, the shrinking operation can be completed.
+
+### 2.1 Stop the service on the scaled-down node
+ * If you are scaling down the master node, identify the physical machine where the master service is located, and stop the master service on the physical machine.
+ * If the worker node is scaled down, determine the physical machine where the worker service is to be scaled down and stop the worker and logger services on the physical machine.
+ 
+```shell
+# stop command:
+bin/stop-all.sh # stop all services
+
+sh bin/dolphinscheduler-daemon.sh stop master-server  # stop master service
+sh bin/dolphinscheduler-daemon.sh stop worker-server  # stop worker service
+sh bin/dolphinscheduler-daemon.sh stop logger-server  # stop logger service
+sh bin/dolphinscheduler-daemon.sh stop api-server     # stop api    service
+sh bin/dolphinscheduler-daemon.sh stop alert-server   # stop alert  service
+
+
+# start command:
+bin/start-all.sh # start all services
+
+sh bin/dolphinscheduler-daemon.sh start master-server # start master service
+sh bin/dolphinscheduler-daemon.sh start worker-server # start worker service
+sh bin/dolphinscheduler-daemon.sh start logger-server # start logger service
+sh bin/dolphinscheduler-daemon.sh start api-server    # start api    service
+sh bin/dolphinscheduler-daemon.sh start alert-server  # start alert  service
+
+```
+
+```
+ Attention: When using stop-all.sh or stop-all.sh, if the machine without the command is not configured to be ssh-free for all machines, it will prompt for the password.
+```
+
+- After the script is completed, use the `jps` command to see if each node service was successfully shut down (`jps` comes with the `Java JDK`)
+
+```
+    MasterServer         ----- master service
+    WorkerServer         ----- worker service
+    LoggerServer         ----- logger service
+    ApiApplicationServer ----- api    service
+    AlertServer          ----- alert  service
+```
+If the corresponding master service or worker service does not exist, then the master/worker service is successfully shut down.
+
+
+### 2.2 Modify the configuration file
+
+ - modify the configuration file `conf/config/install_config.conf` on the **all** nodes, synchronizing the following configuration.
+    
+    * to scale down the master node, modify the ips and masters parameters.
+    * to scale down worker nodes, modify the ips and workers parameters.
+
+```shell
+# which machines to deploy DS services on, "localhost" for this machine
+ips="ds1,ds2,ds3,ds4"
+
+# ssh port,default: 22
+sshPort="22"
+
+# which machine the master service is deployed on
+masters="existing master01,existing master02,ds1,ds2"
+
+# The worker service is deployed on which machine, and specify which worker group this worker belongs to, the following example of "default" is the group name
+workers="existing worker01:default,existing worker02:default,ds3:default,ds4:default"
+
+```
diff --git a/docs/2.0.9/docs/en/guide/flink-call.md b/docs/2.0.9/docs/en/guide/flink-call.md
new file mode 100644
index 0000000..2b86d7c
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/flink-call.md
@@ -0,0 +1,152 @@
+# Flink Calls Operating steps
+
+### Create a queue
+
+1. Log in to the scheduling system, click "Security", then click "Queue manage" on the left, and click "Create queue" to create a queue.
+2. Fill in the name and value of the queue, and click "Submit" 
+
+<p align="center">
+   <img src="/img/api/create_queue.png" width="80%" />
+ </p>
+
+
+
+
+### Create a tenant 
+
+```
+1. The tenant corresponds to a Linux user, which the user worker uses to submit jobs. If Linux OS environment does not have this user, the worker will create this user when executing the script.
+2. Both the tenant and the tenant code are unique and cannot be repeated, just like a person has a name and id number.  
+3. After creating a tenant, there will be a folder in the HDFS relevant directory.  
+```
+
+<p align="center">
+   <img src="/img/api/create_tenant.png" width="80%" />
+ </p>
+
+
+
+
+### Create a user
+
+<p align="center">
+   <img src="/img/api/create_user.png" width="80%" />
+ </p>
+
+
+
+
+### Create a token
+
+1. Log in to the scheduling system, click "Security", then click "Token manage" on the left, and click "Create token" to create a token.
+
+<p align="center">
+   <img src="/img/token-management-en.png" width="80%" />
+ </p>
+
+
+2. Select the "Expiration time" (Token validity), select "User" (to perform the API operation with the specified user), click "Generate token", copy the Token string, and click "Submit"
+
+<p align="center">
+   <img src="/img/create-token-en1.png" width="80%" />
+ </p>
+
+
+### Use token
+
+1. Open the API documentation page
+
+   > Address:http://{api server ip}:12345/dolphinscheduler/doc.html?language=en_US&lang=en
+
+<p align="center">
+   <img src="/img/api-documentation-en.png" width="80%" />
+ </p>
+
+
+2. Select a test API, the API selected for this test: queryAllProjectList
+
+   > projects/query-project-list
+   >                                                                  >
+
+3. Open Postman, fill in the API address, and enter the Token in Headers, and then send the request to view the result
+
+   ```
+   token: The Token just generated
+   ```
+
+<p align="center">
+   <img src="/img/test-api.png" width="80%" />
+ </p>  
+
+
+
+### User authorization
+
+<p align="center">
+   <img src="/img/api/user_authorization.png" width="80%" />
+ </p>
+
+
+
+
+### User login
+
+```
+http://192.168.1.163:12345/dolphinscheduler/ui/#/monitor/servers/master
+```
+
+<p align="center">
+   <img src="/img/api/user_login.png" width="80%" />
+ </p>
+
+
+
+
+### Upload the resource
+
+<p align="center">
+   <img src="/img/api/upload_resource.png" width="80%" />
+ </p>
+
+
+
+
+### Create a workflow
+
+<p align="center">
+   <img src="/img/api/create_workflow1.png" width="80%" />
+ </p>
+
+
+<p align="center">
+   <img src="/img/api/create_workflow2.png" width="80%" />
+ </p>
+
+
+<p align="center">
+   <img src="/img/api/create_workflow3.png" width="80%" />
+ </p>
+
+
+<p align="center">
+   <img src="/img/api/create_workflow4.png" width="80%" />
+ </p>
+
+
+
+
+### View the execution result
+
+<p align="center">
+   <img src="/img/api/execution_result.png" width="80%" />
+ </p>
+
+
+
+
+### View log
+
+<p align="center">
+   <img src="/img/api/log.png" width="80%" />
+ </p>
+
diff --git a/docs/2.0.9/docs/en/guide/homepage.md b/docs/2.0.9/docs/en/guide/homepage.md
new file mode 100644
index 0000000..285f7eb
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/homepage.md
@@ -0,0 +1,7 @@
+# Workflow Overview
+
+The home page contains task status statistics, process status statistics, and workflow definition statistics for all projects of the user.
+
+<p align="center">
+<img src="/img/home_en.png" width="80%" />
+</p>
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/installation/cluster.md b/docs/2.0.9/docs/en/guide/installation/cluster.md
new file mode 100644
index 0000000..be179f8
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/cluster.md
@@ -0,0 +1,36 @@
+# Cluster Deployment
+
+Cluster deployment is to deploy the DolphinScheduler on multiple machines for running a large number of tasks in production.
+
+If you are a green hand and want to experience DolphinScheduler, we recommended you install follow [Standalone](standalone.md). If you want to experience more complete functions or schedule large tasks number, we recommended you install follow [pseudo-cluster deployment](pseudo-cluster.md). If you want to using DolphinScheduler in production, we recommended you follow [cluster deployment](cluster.md) or [kubernetes](kubernetes.md)
+
+## Deployment Step
+
+Cluster deployment uses the same scripts and configuration files as we deploy in [pseudo-cluster deployment](pseudo-cluster.md), so the prepare and required are the same as pseudo-cluster deployment. The difference is that [pseudo-cluster deployment](pseudo-cluster.md) is for one machine, while cluster deployment (Cluster) for multiple. and the steps of "Modify configuration" are quite different between pseudo-cluster deployment and cluster deployment.
+
+### Prepare && DolphinScheduler startup environment
+
+Because of cluster deployment for multiple machine, so you have to run you "Prepare" and "startup" in every machine in [pseudo-cluster.md](pseudo-cluster.md), except section "Configure machine SSH password-free login", "Start zookeeper", "Initialize the database", which is only for deployment or just need an single server
+
+### Modify configuration
+
+This is a step that is quite different from [pseudo-cluster.md](pseudo-cluster.md), because the deployment script will transfer the resources required for installation machine to each deployment machine using `scp`. And we have to declare all machine we want to install DolphinScheduler and then run script `install.sh`. The configuration file is under the path `conf/config/install_config.conf`, here we only need to modify section **INSTALL MACHINE**, **DolphinScheduler ENV, Database, Registry Server** and keep other same as [pseudo-cluster deployment](pseudo-cluster .md), the following describes the parameters that must be modified
+
+```shell
+# ---------------------------------------------------------
+# INSTALL MACHINE
+# ---------------------------------------------------------
+# Using IP or machine hostname for server going to deploy master, worker, API server, the IP of the server
+# If you using hostname, make sure machine could connect each others by hostname
+# As below, the hostname of the machine deploying DolphinScheduler is ds1, ds2, ds3, ds4, ds5, where ds1, ds2 install master server, ds3, ds4, and ds5 installs worker server, the alert server is installed in ds4, and the api server is installed in ds5
+ips="ds1,ds2,ds3,ds4,ds5"
+masters="ds1,ds2"
+workers="ds3:default,ds4:default,ds5:default"
+alertServer="ds4"
+apiServers="ds5"
+pythonGatewayServers="ds5"
+```
+
+## Start DolphinScheduler && Login DolphinScheduler && Server Start And Stop
+
+Same as pseudo-cluster.md](pseudo-cluster.md)
diff --git a/docs/2.0.9/docs/en/guide/installation/docker.md b/docs/2.0.9/docs/en/guide/installation/docker.md
new file mode 100644
index 0000000..e8b1306
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/docker.md
@@ -0,0 +1,1043 @@
+# QuickStart in Docker
+
+## Prerequisites
+
+ - [Docker](https://docs.docker.com/engine/install/) 1.13.1+
+ - [Docker Compose](https://docs.docker.com/compose/) 1.11.0+
+
+## How to use this Docker image
+
+Here're 3 ways to quickly install DolphinScheduler
+
+### The First Way: Start a DolphinScheduler by docker-compose (recommended)
+
+In this way, you need to install [docker-compose](https://docs.docker.com/compose/) as a prerequisite, please install it yourself according to the rich docker-compose installation guidance on the Internet
+
+For Windows 7-10, you can install [Docker Toolbox](https://github.com/docker/toolbox/releases). For Windows 10 64-bit, you can install [Docker Desktop](https://docs.docker.com/docker-for-windows/install/), and pay attention to the [system requirements](https://docs.docker.com/docker-for-windows/install/#system-requirements)
+
+#### 0. Configure memory not less than 4GB
+
+For Mac user, click `Docker Desktop -> Preferences -> Resources -> Memory`
+
+For Windows Docker Toolbox user, two items need to be configured:
+
+ - **Memory**: Open Oracle VirtualBox Manager, if you double-click Docker Quickstart Terminal and successfully run Docker Toolbox, you will see a Virtual Machine named `default`. And click `Settings -> System -> Motherboard -> Base Memory`
+ - **Port Forwarding**: Click `Settings -> Network -> Advanced -> Port forwarding -> Add`. `Name`, `Host Port` and `Guest Port` all fill in `12345`, regardless of `Host IP` and `Guest IP`
+
+For Windows Docker Desktop user
+ - **Hyper-V mode**: Click `Docker Desktop -> Settings -> Resources -> Memory`
+ - **WSL 2 mode**: Refer to [WSL 2 utility VM](https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-global-options-with-wslconfig)
+
+#### 1. Download the Source Code Package
+
+Please download the source code package apache-dolphinscheduler-2.0.9-src.tar.gz, download address: [download](/en-us/download/download.html)
+
+#### 2. Pull Image and Start the Service
+
+> For Mac and Linux user, open **Terminal**
+> For Windows Docker Toolbox user, open **Docker Quickstart Terminal**
+> For Windows Docker Desktop user, open **Windows PowerShell**
+
+```
+$ tar -zxvf apache-dolphinscheduler-2.0.9-src.tar.gz
+$ cd apache-dolphinscheduler-2.0.9-src/docker/docker-swarm
+$ docker pull dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+$ docker tag apache/dolphinscheduler:2.0.9 apache/dolphinscheduler:latest
+$ docker-compose up -d
+```
+
+> PowerShell should use `cd apache-dolphinscheduler-2.0.9-src\docker\docker-swarm`
+
+The **PostgreSQL** (with username `root`, password `root` and database `dolphinscheduler`) and **ZooKeeper** services will start by default
+
+#### 3. Login
+
+Visit the Web UI: http://localhost:12345/dolphinscheduler (The local address is http://localhost:12345/dolphinscheduler)
+
+The default username is `admin` and the default password is `dolphinscheduler123`
+
+<p align="center">
+  <img src="/img/login_en.png" width="60%" />
+</p>
+
+Please refer to the `Quick Start` in the chapter [Quick Start](../quick-start.md) to explore how to use DolphinScheduler
+
+### The Second Way: Start via specifying the existing PostgreSQL and ZooKeeper service
+
+In this way, you need to install [docker](https://docs.docker.com/engine/install/) as a prerequisite, please install it yourself according to the rich docker installation guidance on the Internet
+
+#### 1. Basic Required Software (please install by yourself)
+
+ - [PostgreSQL](https://www.postgresql.org/download/) (8.2.15+)
+ - [ZooKeeper](https://zookeeper.apache.org/releases.html) (3.4.6+)
+ - [Docker](https://docs.docker.com/engine/install/) (1.13.1+)
+
+#### 2. Please login to the PostgreSQL database and create a database named `dolphinscheduler`
+
+#### 3. Initialize the database, import `sql/dolphinscheduler_postgre.sql` to create tables and initial data
+
+#### 4. Download the DolphinScheduler Image
+
+We have already uploaded user-oriented DolphinScheduler image to the Docker repository so that you can pull the image from the docker repository:
+
+```
+docker pull dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+```
+
+#### 5. Run a DolphinScheduler Instance
+
+```
+$ docker run -d --name dolphinscheduler \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+-e ZOOKEEPER_QUORUM="192.168.x.x:2181" \
+-p 12345:12345 \
+apache/dolphinscheduler:2.0.9 all
+```
+
+Note: database username test and password test need to be replaced with your actual PostgreSQL username and password, 192.168.x.x need to be replaced with your relate PostgreSQL and ZooKeeper host IP
+
+#### 6. Login
+
+Same as above
+
+### The Third Way: Start a standalone DolphinScheduler server
+
+The following services are automatically started when the container starts:
+
+```
+     MasterServer         ----- master service
+     WorkerServer         ----- worker service
+     LoggerServer         ----- logger service
+     ApiApplicationServer ----- api service
+     AlertServer          ----- alert service
+     PythonGatewayServer  ----- python gateway service
+```
+
+If you just want to run part of the services in the DolphinScheduler
+
+You can start some services in DolphinScheduler by running the following commands.
+
+* Start a **master server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler-master \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+-e ZOOKEEPER_QUORUM="192.168.x.x:2181" \
+apache/dolphinscheduler:2.0.9 master-server
+```
+
+* Start a **worker server** (including **logger server**), For example:
+
+```
+$ docker run -d --name dolphinscheduler-worker \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+-e ZOOKEEPER_QUORUM="192.168.x.x:2181" \
+apache/dolphinscheduler:2.0.9 worker-server
+```
+
+* Start a **api server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler-api \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+-e ZOOKEEPER_QUORUM="192.168.x.x:2181" \
+-p 12345:12345 \
+apache/dolphinscheduler:2.0.9 api-server
+```
+
+* Start a **alert server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler-alert \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+apache/dolphinscheduler:2.0.9 alert-server
+```
+
+* Start a **python gateway server**, For example:
+
+```
+$ docker run -d --name dolphinscheduler-python-gateway \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+apache/dolphinscheduler:2.0.9 python-gateway
+```
+
+**Note**: You must be specify `DATABASE_HOST`, `DATABASE_PORT`, `DATABASE_DATABASE`, `DATABASE_USERNAME`, `DATABASE_PASSWORD`, `ZOOKEEPER_QUORUM` when start a standalone dolphinscheduler server.
+
+## Environment Variables
+
+The Docker container is configured through environment variables, and the [Appendix-Environment Variables](#appendix-environment-variables) lists the configurable environment variables of the DolphinScheduler and their default values
+
+Especially, it can be configured through the environment variable configuration file `config.env.sh` in Docker Compose and Docker Swarm
+
+## Support Matrix
+
+| Type                                                         | Support      | Notes                                 |
+| ------------------------------------------------------------ | ------------ | ------------------------------------- |
+| Shell                                                        | Yes          |                                       |
+| Python2                                                      | Yes          |                                       |
+| Python3                                                      | Indirect Yes | Refer to FAQ                          |
+| Hadoop2                                                      | Indirect Yes | Refer to FAQ                          |
+| Hadoop3                                                      | Not Sure     | Not tested                            |
+| Spark-Local(client)                                          | Indirect Yes | Refer to FAQ                          |
+| Spark-YARN(cluster)                                          | Indirect Yes | Refer to FAQ                          |
+| Spark-Standalone(cluster)                                    | Not Yet      |                                       |
+| Spark-Kubernetes(cluster)                                    | Not Yet      |                                       |
+| Flink-Local(local>=1.11)                                     | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-YARN(yarn-cluster)                                     | Indirect Yes | Refer to FAQ                          |
+| Flink-YARN(yarn-session/yarn-per-job/yarn-application>=1.11) | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-Standalone(default)                                    | Not Yet      |                                       |
+| Flink-Standalone(remote>=1.11)                               | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-Kubernetes(default)                                    | Not Yet      |                                       |
+| Flink-Kubernetes(remote>=1.11)                               | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-NativeKubernetes(kubernetes-session/application>=1.11) | Not Yet      | Generic CLI mode is not yet supported |
+| MapReduce                                                    | Indirect Yes | Refer to FAQ                          |
+| Kerberos                                                     | Indirect Yes | Refer to FAQ                          |
+| HTTP                                                         | Yes          |                                       |
+| DataX                                                        | Indirect Yes | Refer to FAQ                          |
+| Sqoop                                                        | Indirect Yes | Refer to FAQ                          |
+| SQL-MySQL                                                    | Indirect Yes | Refer to FAQ                          |
+| SQL-PostgreSQL                                               | Yes          |                                       |
+| SQL-Hive                                                     | Indirect Yes | Refer to FAQ                          |
+| SQL-Spark                                                    | Indirect Yes | Refer to FAQ                          |
+| SQL-ClickHouse                                               | Indirect Yes | Refer to FAQ                          |
+| SQL-Oracle                                                   | Indirect Yes | Refer to FAQ                          |
+| SQL-SQLServer                                                | Indirect Yes | Refer to FAQ                          |
+| SQL-DB2                                                      | Indirect Yes | Refer to FAQ                          |
+
+## FAQ
+
+### How to manage DolphinScheduler by docker-compose?
+
+Start, restart, stop or list containers:
+
+```
+docker-compose start
+docker-compose restart
+docker-compose stop
+docker-compose ps
+```
+
+Stop containers and remove containers, networks:
+
+```
+docker-compose down
+```
+
+Stop containers and remove containers, networks and volumes:
+
+```
+docker-compose down -v
+```
+
+### How to view the logs of a container?
+
+List all running containers:
+
+```
+docker ps
+docker ps --format "{{.Names}}" # only print names
+```
+
+View the logs of a container named docker-swarm_dolphinscheduler-api_1:
+
+```
+docker logs docker-swarm_dolphinscheduler-api_1
+docker logs -f docker-swarm_dolphinscheduler-api_1 # follow log output
+docker logs --tail 10 docker-swarm_dolphinscheduler-api_1 # show last 10 lines from the end of the logs
+```
+
+### How to scale master and worker by docker-compose?
+
+Scale master to 2 instances:
+
+```
+docker-compose up -d --scale dolphinscheduler-master=2 dolphinscheduler-master
+```
+
+Scale worker to 3 instances:
+
+```
+docker-compose up -d --scale dolphinscheduler-worker=3 dolphinscheduler-worker
+```
+
+### How to deploy DolphinScheduler on Docker Swarm?
+
+Assuming that the Docker Swarm cluster has been created (If there is no Docker Swarm cluster, please refer to [create-swarm](https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/))
+
+Start a stack named dolphinscheduler:
+
+```
+docker stack deploy -c docker-stack.yml dolphinscheduler
+```
+
+List the services in the stack named dolphinscheduler:
+
+```
+docker stack services dolphinscheduler
+```
+
+Stop and remove the stack named dolphinscheduler:
+
+```
+docker stack rm dolphinscheduler
+```
+
+Remove the volumes of the stack named dolphinscheduler:
+
+```
+docker volume rm -f $(docker volume ls --format "{{.Name}}" | grep -e "^dolphinscheduler")
+```
+
+### How to scale master and worker on Docker Swarm?
+
+Scale master of the stack named dolphinscheduler to 2 instances:
+
+```
+docker service scale dolphinscheduler_dolphinscheduler-master=2
+```
+
+Scale worker of the stack named dolphinscheduler to 3 instances:
+
+```
+docker service scale dolphinscheduler_dolphinscheduler-worker=3
+```
+
+### How to build a Docker image?
+
+#### Build from the source code (Require Maven 3.3+ & JDK 1.8+)
+
+In Unix-Like, execute in Terminal:
+
+```bash
+$ bash ./docker/build/hooks/build
+```
+
+In Windows, execute in cmd or PowerShell:
+
+```bat
+C:\dolphinscheduler-src>.\docker\build\hooks\build.bat
+```
+
+Please read `./docker/build/hooks/build` `./docker/build/hooks/build.bat` script files if you don't understand
+
+#### Build from the binary distribution (Not require Maven 3.3+ & JDK 1.8+)
+
+Please download the binary distribution package apache-dolphinscheduler-2.0.9-bin.tar.gz, download address: [download](/en-us/download/download.html). And put apache-dolphinscheduler-2.0.7-bin.tar.gz into the `apache-dolphinscheduler-2.0.7-src/docker/build` directory, execute in Terminal or PowerShell:
+
+```
+$ cd apache-dolphinscheduler-2.0.7-src/docker/build
+$ docker build --build-arg VERSION=2.0.7 -t apache/dolphinscheduler:2.0.7 .
+```
+
+> PowerShell should use `cd apache-dolphinscheduler-2.0.7-src/docker/build`
+
+#### Build multi-platform images
+
+Currently support to build images including `linux/amd64` and `linux/arm64` platform architecture, requirements:
+
+1. Support [docker buildx](https://docs.docker.com/engine/reference/commandline/buildx/)
+2. Own the push permission of https://hub.docker.com/r/apache/dolphinscheduler (**Be cautious**: The build command will automatically push the multi-platform architecture images to the docker hub of apache/dolphinscheduler by default)
+
+Execute:
+
+```bash
+$ docker login # login to push apache/dolphinscheduler
+$ bash ./docker/build/hooks/build
+```
+
+### How to add an environment variable for Docker?
+
+If you would like to do additional initialization in an image derived from this one, add one or more environment variables under `/root/start-init-conf.sh`, and modify template files in `/opt/dolphinscheduler/conf/*.tpl`.
+
+For example, to add an environment variable `SECURITY_AUTHENTICATION_TYPE` in `/root/start-init-conf.sh`:
+
+```
+export SECURITY_AUTHENTICATION_TYPE=PASSWORD
+```
+
+and to modify `application-api.properties.tpl` template file, add the `SECURITY_AUTHENTICATION_TYPE`:
+```
+security.authentication.type=${SECURITY_AUTHENTICATION_TYPE}
+```
+
+`/root/start-init-conf.sh` will dynamically generate config file:
+
+```sh
+echo "generate dolphinscheduler config"
+ls ${DOLPHINSCHEDULER_HOME}/conf/ | grep ".tpl" | while read line; do
+eval "cat << EOF
+$(cat ${DOLPHINSCHEDULER_HOME}/conf/${line})
+EOF
+" > ${DOLPHINSCHEDULER_HOME}/conf/${line%.*}
+done
+```
+
+### How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?
+
+> Because of the commercial license, we cannot directly use the driver of MySQL.
+>
+> If you want to use MySQL, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
+
+2. Create a new `Dockerfile` to add MySQL driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.7
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including MySQL driver:
+
+```
+docker build -t apache/dolphinscheduler:mysql-driver .
+```
+
+4. Modify all `image` fields to `apache/dolphinscheduler:mysql-driver` in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+5. Comment the `dolphinscheduler-postgresql` block in `docker-compose.yml`
+
+6. Add `dolphinscheduler-mysql` service in `docker-compose.yml` (**Optional**, you can directly use an external MySQL database)
+
+7. Modify DATABASE environment variables in `config.env.sh`
+
+```
+DATABASE_TYPE=mysql
+DATABASE_DRIVER=com.mysql.jdbc.Driver
+DATABASE_HOST=dolphinscheduler-mysql
+DATABASE_PORT=3306
+DATABASE_USERNAME=root
+DATABASE_PASSWORD=root
+DATABASE_DATABASE=dolphinscheduler
+DATABASE_PARAMS=useUnicode=true&characterEncoding=UTF-8
+```
+
+> If you have added `dolphinscheduler-mysql` service in `docker-compose.yml`, just set `DATABASE_HOST` to `dolphinscheduler-mysql`
+
+8. Run a dolphinscheduler (See **How to use this docker image**)
+
+### How to support MySQL datasource in `Datasource manage`?
+
+> Because of the commercial license, we cannot directly use the driver of MySQL.
+>
+> If you want to add MySQL datasource, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
+
+2. Create a new `Dockerfile` to add MySQL driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.7
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including MySQL driver:
+
+```
+docker build -t apache/dolphinscheduler:mysql-driver .
+```
+
+4. Modify all `image` fields to `apache/dolphinscheduler:mysql-driver` in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+5. Run a dolphinscheduler (See **How to use this docker image**)
+
+6. Add a MySQL datasource in `Datasource manage`
+
+### How to support Oracle datasource in `Datasource manage`?
+
+> Because of the commercial license, we cannot directly use the driver of Oracle.
+>
+> If you want to add Oracle datasource, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the Oracle driver [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (such as `ojdbc8-19.9.0.0.jar`)
+
+2. Create a new `Dockerfile` to add Oracle driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.7
+COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including Oracle driver:
+
+```
+docker build -t apache/dolphinscheduler:oracle-driver .
+```
+
+4. Modify all `image` fields to `apache/dolphinscheduler:oracle-driver` in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+5. Run a dolphinscheduler (See **How to use this docker image**)
+
+6. Add an Oracle datasource in `Datasource manage`
+
+### How to support Python 2 pip and custom requirements.txt?
+
+1. Create a new `Dockerfile` to install pip:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.7
+COPY requirements.txt /tmp
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends python-pip && \
+    pip install --no-cache-dir -r /tmp/requirements.txt && \
+    rm -rf /var/lib/apt/lists/*
+```
+
+The command will install the default **pip 18.1**. If you upgrade the pip, just add one line
+
+```
+    pip install --no-cache-dir -U pip && \
+```
+
+2. Build a new docker image including pip:
+
+```
+docker build -t apache/dolphinscheduler:pip .
+```
+
+3. Modify all `image` fields to `apache/dolphinscheduler:pip` in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+4. Run a dolphinscheduler (See **How to use this docker image**)
+
+5. Verify pip under a new Python task
+
+### How to support Python 3?
+
+1. Create a new `Dockerfile` to install Python 3:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.7
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends python3 && \
+    rm -rf /var/lib/apt/lists/*
+```
+
+The command will install the default **Python 3.7.3**. If you also want to install **pip3**, just replace `python3` with `python3-pip` like
+
+```
+    apt-get install -y --no-install-recommends python3-pip && \
+```
+
+2. Build a new docker image including Python 3:
+
+```
+docker build -t apache/dolphinscheduler:python3 .
+```
+
+3. Modify all `image` fields to `apache/dolphinscheduler:python3` in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+4. Modify `PYTHON_HOME` to `/usr/bin/python3` in `config.env.sh`
+
+5. Run a dolphinscheduler (See **How to use this docker image**)
+
+6. Verify Python 3 under a new Python task
+
+### How to support Hadoop, Spark, Flink, Hive or DataX?
+
+Take Spark 2.4.7 as an example:
+
+1. Download the Spark 2.4.7 release binary `spark-2.4.7-bin-hadoop2.7.tgz`
+
+2. Run a dolphinscheduler (See **How to use this docker image**)
+
+3. Copy the Spark 2.4.7 release binary into Docker container
+
+```bash
+docker cp spark-2.4.7-bin-hadoop2.7.tgz docker-swarm_dolphinscheduler-worker_1:/opt/soft
+```
+
+Because the volume `dolphinscheduler-shared-local` is mounted on `/opt/soft`, all files in `/opt/soft` will not be lost
+
+4. Attach the container and ensure that `SPARK_HOME2` exists
+
+```bash
+docker exec -it docker-swarm_dolphinscheduler-worker_1 bash
+cd /opt/soft
+tar zxf spark-2.4.7-bin-hadoop2.7.tgz
+rm -f spark-2.4.7-bin-hadoop2.7.tgz
+ln -s spark-2.4.7-bin-hadoop2.7 spark2 # or just mv
+$SPARK_HOME2/bin/spark-submit --version
+```
+
+The last command will print the Spark version if everything goes well
+
+5. Verify Spark under a Shell task
+
+```
+$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.11-2.4.7.jar
+```
+
+Check whether the task log contains the output like `Pi is roughly 3.146015`
+
+6. Verify Spark under a Spark task
+
+The file `spark-examples_2.11-2.4.7.jar` needs to be uploaded to the resources first, and then create a Spark task with:
+
+- Spark Version: `SPARK2`
+- Main Class: `org.apache.spark.examples.SparkPi`
+- Main Package: `spark-examples_2.11-2.4.7.jar`
+- Deploy Mode: `local`
+
+Similarly, check whether the task log contains the output like `Pi is roughly 3.146015`
+
+7. Verify Spark on YARN
+
+Spark on YARN (Deploy Mode is `cluster` or `client`) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps
+
+Ensure that `$HADOOP_HOME` and `$HADOOP_CONF_DIR` exists
+
+### How to support Spark 3?
+
+In fact, the way to submit applications with `spark-submit` is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of `SPARK_HOME2` is the second `SPARK_HOME` instead of `SPARK2`'s `HOME`, so just set `SPARK_HOME2=/path/to/spark3`
+
+Take Spark 3.1.1 as an example:
+
+1. Download the Spark 3.1.1 release binary `spark-3.1.1-bin-hadoop2.7.tgz`
+
+2. Run a dolphinscheduler (See **How to use this docker image**)
+
+3. Copy the Spark 3.1.1 release binary into Docker container
+
+```bash
+docker cp spark-3.1.1-bin-hadoop2.7.tgz docker-swarm_dolphinscheduler-worker_1:/opt/soft
+```
+
+4. Attach the container and ensure that `SPARK_HOME2` exists
+
+```bash
+docker exec -it docker-swarm_dolphinscheduler-worker_1 bash
+cd /opt/soft
+tar zxf spark-3.1.1-bin-hadoop2.7.tgz
+rm -f spark-3.1.1-bin-hadoop2.7.tgz
+ln -s spark-3.1.1-bin-hadoop2.7 spark2 # or just mv
+$SPARK_HOME2/bin/spark-submit --version
+```
+
+The last command will print the Spark version if everything goes well
+
+5. Verify Spark under a Shell task
+
+```
+$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
+```
+
+Check whether the task log contains the output like `Pi is roughly 3.146015`
+
+### How to support shared storage between Master, Worker and Api server?
+
+> **Note**: If it is deployed on a single machine by `docker-compose`, step 1 and 2 can be skipped directly, and execute the command like `docker cp hadoop-3.2.2.tar.gz docker-swarm_dolphinscheduler-worker_1:/opt/soft` to put Hadoop into the shared directory `/opt/soft` in the container
+
+For example, Master, Worker and Api server may use Hadoop at the same time
+
+1. Modify the volume `dolphinscheduler-shared-local` to support NFS in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+```yaml
+volumes:
+  dolphinscheduler-shared-local:
+    driver_opts:
+      type: "nfs"
+      o: "addr=10.40.0.199,nolock,soft,rw"
+      device: ":/path/to/shared/dir"
+```
+
+2. Put the Hadoop into the NFS
+
+3. Ensure that `$HADOOP_HOME` and `$HADOOP_CONF_DIR` are correct
+
+### How to support local file resource storage instead of HDFS and S3?
+
+> **Note**: If it is deployed on a single machine by `docker-compose`, step 2 can be skipped directly
+
+1. Modify the following environment variables in `config.env.sh`:
+
+```
+RESOURCE_STORAGE_TYPE=HDFS
+FS_DEFAULT_FS=file:///
+```
+
+2. Modify the volume `dolphinscheduler-resource-local` to support NFS in `docker-compose.yml`
+
+> If you want to deploy dolphinscheduler on Docker Swarm, you need to modify `docker-stack.yml`
+
+```yaml
+volumes:
+  dolphinscheduler-resource-local:
+    driver_opts:
+      type: "nfs"
+      o: "addr=10.40.0.199,nolock,soft,rw"
+      device: ":/path/to/resource/dir"
+```
+
+### How to support S3 resource storage like MinIO?
+
+Take MinIO as an example: Modify the following environment variables in `config.env.sh`
+
+```
+RESOURCE_STORAGE_TYPE=S3
+RESOURCE_UPLOAD_PATH=/dolphinscheduler
+FS_DEFAULT_FS=s3a://BUCKET_NAME
+FS_S3A_ENDPOINT=http://MINIO_IP:9000
+FS_S3A_ACCESS_KEY=MINIO_ACCESS_KEY
+FS_S3A_SECRET_KEY=MINIO_SECRET_KEY
+```
+
+`BUCKET_NAME`, `MINIO_IP`, `MINIO_ACCESS_KEY` and `MINIO_SECRET_KEY` need to be modified to actual values
+
+> **Note**: `MINIO_IP` can only use IP instead of the domain name, because DolphinScheduler currently doesn't support S3 path style access
+
+### How to configure SkyWalking?
+
+Modify SkyWalking environment variables in `config.env.sh`:
+
+```
+SKYWALKING_ENABLE=true
+SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
+SW_GRPC_LOG_SERVER_HOST=127.0.0.1
+SW_GRPC_LOG_SERVER_PORT=11800
+```
+
+## Appendix-Environment Variables
+
+### Database
+
+**`DATABASE_TYPE`**
+
+This environment variable sets the type for the database. The default value is `postgresql`.
+
+**Note**: You must be specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_DRIVER`**
+
+This environment variable sets the type for the database. The default value is `org.postgresql.Driver`.
+
+**Note**: You must specify it when starting a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_HOST`**
+
+This environment variable sets the host for the database. The default value is `127.0.0.1`.
+
+**Note**: You must specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_PORT`**
+
+This environment variable sets the port for the database. The default value is `5432`.
+
+**Note**: You must specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_USERNAME`**
+
+This environment variable sets the username for the database. The default value is `root`.
+
+**Note**: You must specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_PASSWORD`**
+
+This environment variable sets the password for the database. The default value is `root`.
+
+**Note**: You must specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_DATABASE`**
+
+This environment variable sets the database for the database. The default value is `dolphinscheduler`.
+
+**Note**: You must specify it when start a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+**`DATABASE_PARAMS`**
+
+This environment variable sets the database for the database. The default value is `characterEncoding=utf8`.
+
+**Note**: You must specify it when starting a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`, `alert-server`.
+
+### ZooKeeper
+
+**`ZOOKEEPER_QUORUM`**
+
+This environment variable sets zookeeper quorum. The default value is `127.0.0.1:2181`.
+
+**Note**: You must specify it when starting a standalone dolphinscheduler server. Like `master-server`, `worker-server`, `api-server`.
+
+**`ZOOKEEPER_ROOT`**
+
+This environment variable sets zookeeper root directory for dolphinscheduler. The default value is `/dolphinscheduler`.
+
+### Common
+
+**`DOLPHINSCHEDULER_OPTS`**
+
+This environment variable sets JVM options for dolphinscheduler, suitable for `master-server`, `worker-server`, `api-server`, `alert-server`, `logger-server`. The default value is empty.
+
+**`DATA_BASEDIR_PATH`**
+
+User data directory path, self configuration, please make sure the directory exists and have read-write permissions. The default value is `/tmp/dolphinscheduler`
+
+**`RESOURCE_STORAGE_TYPE`**
+
+This environment variable sets resource storage types for dolphinscheduler like `HDFS`, `S3`, `NONE`. The default value is `HDFS`.
+
+**`RESOURCE_UPLOAD_PATH`**
+
+This environment variable sets resource store path on HDFS/S3 for resource storage. The default value is `/dolphinscheduler`.
+
+**`FS_DEFAULT_FS`**
+
+This environment variable sets fs.defaultFS for resource storage like `file:///`, `hdfs://mycluster:8020` or `s3a://dolphinscheduler`. The default value is `file:///`.
+
+**`FS_S3A_ENDPOINT`**
+
+This environment variable sets s3 endpoint for resource storage. The default value is `s3.xxx.amazonaws.com`.
+
+**`FS_S3A_ACCESS_KEY`**
+
+This environment variable sets s3 access key for resource storage. The default value is `xxxxxxx`.
+
+**`FS_S3A_SECRET_KEY`**
+
+This environment variable sets s3 secret key for resource storage. The default value is `xxxxxxx`.
+
+**`HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE`**
+
+This environment variable sets whether to startup Kerberos. The default value is `false`.
+
+**`JAVA_SECURITY_KRB5_CONF_PATH`**
+
+This environment variable sets java.security.krb5.conf path. The default value is `/opt/krb5.conf`.
+
+**`LOGIN_USER_KEYTAB_USERNAME`**
+
+This environment variable sets login user from the keytab username. The default value is `hdfs@HADOOP.COM`.
+
+**`LOGIN_USER_KEYTAB_PATH`**
+
+This environment variable sets login user from the keytab path. The default value is `/opt/hdfs.keytab`.
+
+**`KERBEROS_EXPIRE_TIME`**
+
+This environment variable sets Kerberos expire time, the unit is hour. The default value is `2`.
+
+**`HDFS_ROOT_USER`**
+
+This environment variable sets HDFS root user when resource.storage.type=HDFS. The default value is `hdfs`.
+
+**`RESOURCE_MANAGER_HTTPADDRESS_PORT`**
+
+This environment variable sets resource manager HTTP address port. The default value is `8088`.
+
+**`YARN_RESOURCEMANAGER_HA_RM_IDS`**
+
+This environment variable sets yarn resourcemanager ha rm ids. The default value is empty.
+
+**`YARN_APPLICATION_STATUS_ADDRESS`**
+
+This environment variable sets yarn application status address. The default value is `http://ds1:%s/ws/v1/cluster/apps/%s`.
+
+**`SKYWALKING_ENABLE`**
+
+This environment variable sets whether to enable SkyWalking. The default value is `false`.
+
+**`SW_AGENT_COLLECTOR_BACKEND_SERVICES`**
+
+This environment variable sets agent collector backend services for SkyWalking. The default value is `127.0.0.1:11800`.
+
+**`SW_GRPC_LOG_SERVER_HOST`**
+
+This environment variable sets gRPC log server host for SkyWalking. The default value is `127.0.0.1`.
+
+**`SW_GRPC_LOG_SERVER_PORT`**
+
+This environment variable sets gRPC log server port for SkyWalking. The default value is `11800`.
+
+**`HADOOP_HOME`**
+
+This environment variable sets `HADOOP_HOME`. The default value is `/opt/soft/hadoop`.
+
+**`HADOOP_CONF_DIR`**
+
+This environment variable sets `HADOOP_CONF_DIR`. The default value is `/opt/soft/hadoop/etc/hadoop`.
+
+**`SPARK_HOME1`**
+
+This environment variable sets `SPARK_HOME1`. The default value is `/opt/soft/spark1`.
+
+**`SPARK_HOME2`**
+
+This environment variable sets `SPARK_HOME2`. The default value is `/opt/soft/spark2`.
+
+**`PYTHON_HOME`**
+
+This environment variable sets `PYTHON_HOME`. The default value is `/usr/bin/python`.
+
+**`JAVA_HOME`**
+
+This environment variable sets `JAVA_HOME`. The default value is `/usr/local/openjdk-8`.
+
+**`HIVE_HOME`**
+
+This environment variable sets `HIVE_HOME`. The default value is `/opt/soft/hive`.
+
+**`FLINK_HOME`**
+
+This environment variable sets `FLINK_HOME`. The default value is `/opt/soft/flink`.
+
+**`DATAX_HOME`**
+
+This environment variable sets `DATAX_HOME`. The default value is `/opt/soft/datax`.
+
+### Master Server
+
+**`MASTER_SERVER_OPTS`**
+
+This environment variable sets JVM options for `master-server`. The default value is `-Xms1g -Xmx1g -Xmn512m`.
+
+**`MASTER_EXEC_THREADS`**
+
+This environment variable sets exec thread number for `master-server`. The default value is `100`.
+
+**`MASTER_EXEC_TASK_NUM`**
+
+This environment variable sets exec task number for `master-server`. The default value is `20`.
+
+**`MASTER_DISPATCH_TASK_NUM`**
+
+This environment variable sets dispatch task number for `master-server`. The default value is `3`.
+
+**`MASTER_HOST_SELECTOR`**
+
+This environment variable sets host selector for `master-server`. Optional values include `Random`, `RoundRobin` and `LowerWeight`. The default value is `LowerWeight`.
+
+**`MASTER_HEARTBEAT_INTERVAL`**
+
+This environment variable sets heartbeat interval for `master-server`. The default value is `10`.
+
+**`MASTER_TASK_COMMIT_RETRYTIMES`**
+
+This environment variable sets task commit retry times for `master-server`. The default value is `5`.
+
+**`MASTER_TASK_COMMIT_INTERVAL`**
+
+This environment variable sets task commit interval for `master-server`. The default value is `1`.
+
+**`MASTER_MAX_CPULOAD_AVG`**
+
+This environment variable sets max CPU load avg for `master-server`. The default value is `-1`.
+
+**`MASTER_RESERVED_MEMORY`**
+
+This environment variable sets reserved memory for `master-server`, the unit is G. The default value is `0.3`.
+
+### Worker Server
+
+**`WORKER_SERVER_OPTS`**
+
+This environment variable sets JVM options for `worker-server`. The default value is `-Xms1g -Xmx1g -Xmn512m`.
+
+**`WORKER_EXEC_THREADS`**
+
+This environment variable sets exec thread number for `worker-server`. The default value is `100`.
+
+**`WORKER_HEARTBEAT_INTERVAL`**
+
+This environment variable sets heartbeat interval for `worker-server`. The default value is `10`.
+
+**`WORKER_MAX_CPULOAD_AVG`**
+
+This environment variable sets max CPU load avg for `worker-server`. The default value is `-1`.
+
+**`WORKER_RESERVED_MEMORY`**
+
+This environment variable sets reserved memory for `worker-server`, the unit is G. The default value is `0.3`.
+
+**`WORKER_GROUPS`**
+
+This environment variable sets groups for `worker-server`. The default value is `default`.
+
+### Alert Server
+
+**`ALERT_SERVER_OPTS`**
+
+This environment variable sets JVM options for `alert-server`. The default value is `-Xms512m -Xmx512m -Xmn256m`.
+
+**`XLS_FILE_PATH`**
+
+This environment variable sets xls file path for `alert-server`. The default value is `/tmp/xls`.
+
+**`MAIL_SERVER_HOST`**
+
+This environment variable sets mail server host for `alert-server`. The default value is empty.
+
+**`MAIL_SERVER_PORT`**
+
+This environment variable sets mail server port for `alert-server`. The default value is empty.
+
+**`MAIL_SENDER`**
+
+This environment variable sets mail sender for `alert-server`. The default value is empty.
+
+**`MAIL_USER=`**
+
+This environment variable sets mail user for `alert-server`. The default value is empty.
+
+**`MAIL_PASSWD`**
+
+This environment variable sets mail password for `alert-server`. The default value is empty.
+
+**`MAIL_SMTP_STARTTLS_ENABLE`**
+
+This environment variable sets SMTP tls for `alert-server`. The default value is `true`.
+
+**`MAIL_SMTP_SSL_ENABLE`**
+
+This environment variable sets SMTP ssl for `alert-server`. The default value is `false`.
+
+**`MAIL_SMTP_SSL_TRUST`**
+
+This environment variable sets SMTP ssl truest for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_ENABLE`**
+
+This environment variable sets enterprise wechat enable for `alert-server`. The default value is `false`.
+
+**`ENTERPRISE_WECHAT_CORP_ID`**
+
+This environment variable sets enterprise wechat corp id for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_SECRET`**
+
+This environment variable sets enterprise wechat secret for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_AGENT_ID`**
+
+This environment variable sets enterprise wechat agent id for `alert-server`. The default value is empty.
+
+**`ENTERPRISE_WECHAT_USERS`**
+
+This environment variable sets enterprise wechat users for `alert-server`. The default value is empty.
+
+### Api Server
+
+**`API_SERVER_OPTS`**
+
+This environment variable sets JVM options for `api-server`. The default value is `-Xms512m -Xmx512m -Xmn256m`.
+
+### Logger Server
+
+**`LOGGER_SERVER_OPTS`**
+
+This environment variable sets JVM options for `logger-server`. The default value is `-Xms512m -Xmx512m -Xmn256m`.
diff --git a/docs/2.0.9/docs/en/guide/installation/hardware.md b/docs/2.0.9/docs/en/guide/installation/hardware.md
new file mode 100644
index 0000000..0c5df7f
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/hardware.md
@@ -0,0 +1,47 @@
+# Hardware Environment
+
+DolphinScheduler, as an open-source distributed workflow task scheduling system, can be well deployed and run in Intel architecture server environments and mainstream virtualization environments, and supports mainstream Linux operating system environments.
+
+## 1. Linux Operating System Version Requirements
+
+| OS       | Version         |
+| :----------------------- | :----------: |
+| Red Hat Enterprise Linux | 7.0 and above   |
+| CentOS                   | 7.0 and above   |
+| Oracle Enterprise Linux  | 7.0 and above   |
+| Ubuntu LTS               | 16.04 and above |
+
+> **Attention:**
+>The above Linux operating systems can run on physical servers and mainstream virtualization environments such as VMware, KVM, and XEN.
+
+## 2. Recommended Server Configuration
+DolphinScheduler supports 64-bit hardware platforms with Intel x86-64 architecture. The following recommendation is made for server hardware configuration in a production environment:
+### Production Environment
+
+| **CPU** | **MEM** | **HD** | **NIC** | **Num** |
+| --- | --- | --- | --- | --- |
+| 4 core+ | 8 GB+ | SAS | GbE | 1+ |
+
+> **Attention:**
+> - The above-recommended configuration is the minimum configuration for deploying DolphinScheduler. The higher configuration is strongly recommended for production environments.
+> - The hard disk size configuration is recommended by more than 50GB. The system disk and data disk are separated.
+
+
+## 3. Network Requirements
+
+DolphinScheduler provides the following network port configurations for normal operation:
+
+| Server | Port | Desc |
+|  --- | --- | --- |
+| MasterServer |  5678  | Not the communication port. Require the native ports do not conflict |
+| WorkerServer | 1234  | Not the communication port. Require the native ports do not conflict |
+| ApiApplicationServer |  12345 | Backend communication port |
+
+> **Attention:**
+> - MasterServer and WorkerServer do not need to enable communication between the networks. As long as the local ports do not conflict.
+> - Administrators can adjust relevant ports on the network side and host-side according to the deployment plan of DolphinScheduler components in the actual environment.
+
+## 4. Browser Requirements
+
+DolphinScheduler recommends Chrome and the latest browsers which using Chrome Kernel to access the front-end visual operator page.
+
diff --git a/docs/2.0.9/docs/en/guide/installation/kubernetes.md b/docs/2.0.9/docs/en/guide/installation/kubernetes.md
new file mode 100644
index 0000000..0c92c8e
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/kubernetes.md
@@ -0,0 +1,765 @@
+# QuickStart in Kubernetes
+
+Kubernetes deployment is deploy DolphinScheduler in a Kubernetes cluster, which can schedule a large number of tasks and can be used in production.
+
+If you are a green hand and want to experience DolphinScheduler, we recommended you install follow [Standalone](standalone.md). If you want to experience more complete functions or schedule large tasks number, we recommended you install follow [pseudo-cluster deployment](pseudo-cluster.md). If you want to using DolphinScheduler in production, we recommended you follow [cluster deployment](cluster.md) or [kubernetes](kubernetes.md)
+
+## Prerequisites
+
+ - [Helm](https://helm.sh/) 3.1.0+
+ - [Kubernetes](https://kubernetes.io/) 1.12+
+ - PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+> NOTE: You need to change your `Chart.yaml` locally to make it work. Due to a change in the Bitnami repo,
+> https://charts.bitnami.com/bitnami was truncated only containing entries for the latest 6 months (from January 2022 on).
+> This URL: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami contains the full 'index.yaml'.
+>
+> See detail here: https://github.com/bitnami/charts/issues/10833.
+>
+> Change you `Chart.yaml` in path `apache-dolphinscheduler-2.0.9-src/docker/kubernetes/dolphinscheduler` after you download the source code
+> and replace two places from `repository: https://charts.bitnami.com/bitnami`
+> to `repository: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami`
+
+Please download the source code package apache-dolphinscheduler-2.0.9-src.tar.gz, download address: [download](/en-us/download/download.html)
+
+To install the chart with the release name `dolphinscheduler`, please execute the following commands:
+
+```
+$ tar -zxvf apache-dolphinscheduler-2.0.9-src.tar.gz
+$ cd apache-dolphinscheduler-2.0.9-src/docker/kubernetes/dolphinscheduler
+$ helm repo add bitnami https://charts.bitnami.com/bitnami
+$ helm dependency update .
+$ helm install dolphinscheduler . --set image.tag=2.0.9
+```
+
+To install the chart with a namespace named `test`:
+
+```bash
+$ helm install dolphinscheduler . -n test
+```
+
+> **Tip**: If a namespace named `test` is used, the option `-n test` needs to be added to the `helm` and `kubectl` command
+
+These commands deploy DolphinScheduler on the Kubernetes cluster in the default configuration. The [Appendix-Configuration](#appendix-configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+The **PostgreSQL** (with username `root`, password `root` and database `dolphinscheduler`) and **ZooKeeper** services will start by default
+
+## Access DolphinScheduler UI
+
+If `ingress.enabled` in `values.yaml` is set to `true`, you just access `http://${ingress.host}/dolphinscheduler` in browser.
+
+> **Tip**: If there is a problem with ingress access, please contact the Kubernetes administrator and refer to the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/)
+
+Otherwise, when `api.service.type=ClusterIP` you need to execute port-forward command like:
+
+```bash
+$ kubectl port-forward --address 0.0.0.0 svc/dolphinscheduler-api 12345:12345
+$ kubectl port-forward --address 0.0.0.0 -n test svc/dolphinscheduler-api 12345:12345 # with test namespace
+```
+
+> **Tip**: If the error of `unable to do port forwarding: socat not found` appears, you need to install `socat` at first
+
+And then access the web: http://localhost:12345/dolphinscheduler (The local address is http://localhost:12345/dolphinscheduler)
+
+Or when `api.service.type=NodePort` you need to execute the command:
+
+```bash
+NODE_IP=$(kubectl get no -n {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+NODE_PORT=$(kubectl get svc {{ template "dolphinscheduler.fullname" . }}-api -n {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}")
+echo http://$NODE_IP:$NODE_PORT/dolphinscheduler
+```
+
+And then access the web: http://$NODE_IP:$NODE_PORT/dolphinscheduler
+
+The default username is `admin` and the default password is `dolphinscheduler123`
+
+Please refer to the `Quick Start` in the chapter [Quick Start](../quick-start.md) to explore how to use DolphinScheduler
+
+## Uninstalling the Chart
+
+To uninstall/delete the `dolphinscheduler` deployment:
+
+```bash
+$ helm uninstall dolphinscheduler
+```
+
+The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release.
+
+To delete the PVC's associated with `dolphinscheduler`:
+
+```bash
+$ kubectl delete pvc -l app.kubernetes.io/instance=dolphinscheduler
+```
+
+> **Note**: Deleting the PVC's will delete all data as well. Please be cautious before doing it.
+
+## Configuration
+
+The configuration file is `values.yaml`, and the [Appendix-Configuration](#appendix-configuration) tables lists the configurable parameters of the DolphinScheduler and their default values.
+
+## Support Matrix
+
+| Type                                                         | Support      | Notes                                 |
+| ------------------------------------------------------------ | ------------ | ------------------------------------- |
+| Shell                                                        | Yes          |                                       |
+| Python2                                                      | Yes          |                                       |
+| Python3                                                      | Indirect Yes | Refer to FAQ                          |
+| Hadoop2                                                      | Indirect Yes | Refer to FAQ                          |
+| Hadoop3                                                      | Not Sure     | Not tested                            |
+| Spark-Local(client)                                          | Indirect Yes | Refer to FAQ                          |
+| Spark-YARN(cluster)                                          | Indirect Yes | Refer to FAQ                          |
+| Spark-Standalone(cluster)                                    | Not Yet      |                                       |
+| Spark-Kubernetes(cluster)                                    | Not Yet      |                                       |
+| Flink-Local(local>=1.11)                                     | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-YARN(yarn-cluster)                                     | Indirect Yes | Refer to FAQ                          |
+| Flink-YARN(yarn-session/yarn-per-job/yarn-application>=1.11) | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-Standalone(default)                                    | Not Yet      |                                       |
+| Flink-Standalone(remote>=1.11)                               | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-Kubernetes(default)                                    | Not Yet      |                                       |
+| Flink-Kubernetes(remote>=1.11)                               | Not Yet      | Generic CLI mode is not yet supported |
+| Flink-NativeKubernetes(kubernetes-session/application>=1.11) | Not Yet      | Generic CLI mode is not yet supported |
+| MapReduce                                                    | Indirect Yes | Refer to FAQ                          |
+| Kerberos                                                     | Indirect Yes | Refer to FAQ                          |
+| HTTP                                                         | Yes          |                                       |
+| DataX                                                        | Indirect Yes | Refer to FAQ                          |
+| Sqoop                                                        | Indirect Yes | Refer to FAQ                          |
+| SQL-MySQL                                                    | Indirect Yes | Refer to FAQ                          |
+| SQL-PostgreSQL                                               | Yes          |                                       |
+| SQL-Hive                                                     | Indirect Yes | Refer to FAQ                          |
+| SQL-Spark                                                    | Indirect Yes | Refer to FAQ                          |
+| SQL-ClickHouse                                               | Indirect Yes | Refer to FAQ                          |
+| SQL-Oracle                                                   | Indirect Yes | Refer to FAQ                          |
+| SQL-SQLServer                                                | Indirect Yes | Refer to FAQ                          |
+| SQL-DB2                                                      | Indirect Yes | Refer to FAQ                          |
+
+## FAQ
+
+### How to view the logs of a pod container?
+
+List all pods (aka `po`):
+
+```
+kubectl get po
+kubectl get po -n test # with test namespace
+```
+
+View the logs of a pod container named dolphinscheduler-master-0:
+
+```
+kubectl logs dolphinscheduler-master-0
+kubectl logs -f dolphinscheduler-master-0 # follow log output
+kubectl logs --tail 10 dolphinscheduler-master-0 -n test # show last 10 lines from the end of the logs
+```
+
+### How to scale api, master and worker on Kubernetes?
+
+List all deployments (aka `deploy`):
+
+```
+kubectl get deploy
+kubectl get deploy -n test # with test namespace
+```
+
+Scale api to 3 replicas:
+
+```
+kubectl scale --replicas=3 deploy dolphinscheduler-api
+kubectl scale --replicas=3 deploy dolphinscheduler-api -n test # with test namespace
+```
+
+List all stateful sets (aka `sts`):
+
+```
+kubectl get sts
+kubectl get sts -n test # with test namespace
+```
+
+Scale master to 2 replicas:
+
+```
+kubectl scale --replicas=2 sts dolphinscheduler-master
+kubectl scale --replicas=2 sts dolphinscheduler-master -n test # with test namespace
+```
+
+Scale worker to 6 replicas:
+
+```
+kubectl scale --replicas=6 sts dolphinscheduler-worker
+kubectl scale --replicas=6 sts dolphinscheduler-worker -n test # with test namespace
+```
+
+### How to use MySQL as the DolphinScheduler's database instead of PostgreSQL?
+
+> Because of the commercial license, we cannot directly use the driver of MySQL.
+>
+> If you want to use MySQL, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
+
+2. Create a new `Dockerfile` to add MySQL driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including MySQL driver:
+
+```
+docker build -t apache/dolphinscheduler:mysql-driver .
+```
+
+4. Push the docker image `apache/dolphinscheduler:mysql-driver` to a docker registry
+
+5. Modify image `repository` and update `tag` to `mysql-driver` in `values.yaml`
+
+6. Modify postgresql `enabled` to `false` in `values.yaml`
+
+7. Modify externalDatabase (especially modify `host`, `username` and `password`) in `values.yaml`:
+
+```yaml
+externalDatabase:
+  type: "mysql"
+  driver: "com.mysql.jdbc.Driver"
+  host: "localhost"
+  port: "3306"
+  username: "root"
+  password: "root"
+  database: "dolphinscheduler"
+  params: "useUnicode=true&characterEncoding=UTF-8"
+```
+
+8. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+### How to support MySQL datasource in `Datasource manage`?
+
+> Because of the commercial license, we cannot directly use the driver of MySQL.
+>
+> If you want to add MySQL datasource, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the MySQL driver [mysql-connector-java-8.0.16.jar](https://repo1.maven.org/maven2/mysql/mysql-connector-java/8.0.16/mysql-connector-java-8.0.16.jar)
+
+2. Create a new `Dockerfile` to add MySQL driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+COPY mysql-connector-java-8.0.16.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including MySQL driver:
+
+```
+docker build -t apache/dolphinscheduler:mysql-driver .
+```
+
+4. Push the docker image `apache/dolphinscheduler:mysql-driver` to a docker registry
+
+5. Modify image `repository` and update `tag` to `mysql-driver` in `values.yaml`
+
+6. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+7. Add a MySQL datasource in `Datasource manage`
+
+### How to support Oracle datasource in `Datasource manage`?
+
+> Because of the commercial license, we cannot directly use the driver of Oracle.
+>
+> If you want to add Oracle datasource, you can build a new image based on the `apache/dolphinscheduler` image as follows.
+
+1. Download the Oracle driver [ojdbc8.jar](https://repo1.maven.org/maven2/com/oracle/database/jdbc/ojdbc8/) (such as `ojdbc8-19.9.0.0.jar`)
+
+2. Create a new `Dockerfile` to add Oracle driver:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+COPY ojdbc8-19.9.0.0.jar /opt/dolphinscheduler/lib
+```
+
+3. Build a new docker image including Oracle driver:
+
+```
+docker build -t apache/dolphinscheduler:oracle-driver .
+```
+
+4. Push the docker image `apache/dolphinscheduler:oracle-driver` to a docker registry
+
+5. Modify image `repository` and update `tag` to `oracle-driver` in `values.yaml`
+
+6. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+7. Add an Oracle datasource in `Datasource manage`
+
+### How to support Python 2 pip and custom requirements.txt?
+
+1. Create a new `Dockerfile` to install pip:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+COPY requirements.txt /tmp
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends python-pip && \
+    pip install --no-cache-dir -r /tmp/requirements.txt && \
+    rm -rf /var/lib/apt/lists/*
+```
+
+The command will install the default **pip 18.1**. If you upgrade the pip, just add one line
+
+```
+    pip install --no-cache-dir -U pip && \
+```
+
+2. Build a new docker image including pip:
+
+```
+docker build -t apache/dolphinscheduler:pip .
+```
+
+3. Push the docker image `apache/dolphinscheduler:pip` to a docker registry
+
+4. Modify image `repository` and update `tag` to `pip` in `values.yaml`
+
+5. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+6. Verify pip under a new Python task
+
+### How to support Python 3?
+
+1. Create a new `Dockerfile` to install Python 3:
+
+```
+FROM dolphinscheduler.docker.scarf.sh/apache/dolphinscheduler:2.0.9
+RUN apt-get update && \
+    apt-get install -y --no-install-recommends python3 && \
+    rm -rf /var/lib/apt/lists/*
+```
+
+The command will install the default **Python 3.7.3**. If you also want to install **pip3**, just replace `python3` with `python3-pip` like
+
+```
+    apt-get install -y --no-install-recommends python3-pip && \
+```
+
+2. Build a new docker image including Python 3:
+
+```
+docker build -t apache/dolphinscheduler:python3 .
+```
+
+3. Push the docker image `apache/dolphinscheduler:python3` to a docker registry
+
+4. Modify image `repository` and update `tag` to `python3` in `values.yaml`
+
+5. Modify `PYTHON_HOME` to `/usr/bin/python3` in `values.yaml`
+
+6. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+7. Verify Python 3 under a new Python task
+
+### How to support Hadoop, Spark, Flink, Hive or DataX?
+
+Take Spark 2.4.7 as an example:
+
+1. Download the Spark 2.4.7 release binary `spark-2.4.7-bin-hadoop2.7.tgz`
+
+2. Ensure that `common.sharedStoragePersistence.enabled` is turned on
+
+3. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+4. Copy the Spark 2.4.7 release binary into the Docker container
+
+```bash
+kubectl cp spark-2.4.7-bin-hadoop2.7.tgz dolphinscheduler-worker-0:/opt/soft
+kubectl cp -n test spark-2.4.7-bin-hadoop2.7.tgz dolphinscheduler-worker-0:/opt/soft # with test namespace
+```
+
+Because the volume `sharedStoragePersistence` is mounted on `/opt/soft`, all files in `/opt/soft` will not be lost
+
+5. Attach the container and ensure that `SPARK_HOME2` exists
+
+```bash
+kubectl exec -it dolphinscheduler-worker-0 bash
+kubectl exec -n test -it dolphinscheduler-worker-0 bash # with test namespace
+cd /opt/soft
+tar zxf spark-2.4.7-bin-hadoop2.7.tgz
+rm -f spark-2.4.7-bin-hadoop2.7.tgz
+ln -s spark-2.4.7-bin-hadoop2.7 spark2 # or just mv
+$SPARK_HOME2/bin/spark-submit --version
+```
+
+The last command will print the Spark version if everything goes well
+
+6. Verify Spark under a Shell task
+
+```
+$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.11-2.4.7.jar
+```
+
+Check whether the task log contains the output like `Pi is roughly 3.146015`
+
+7. Verify Spark under a Spark task
+
+The file `spark-examples_2.11-2.4.7.jar` needs to be uploaded to the resources first, and then create a Spark task with:
+
+- Spark Version: `SPARK2`
+- Main Class: `org.apache.spark.examples.SparkPi`
+- Main Package: `spark-examples_2.11-2.4.7.jar`
+- Deploy Mode: `local`
+
+Similarly, check whether the task log contains the output like `Pi is roughly 3.146015`
+
+8. Verify Spark on YARN
+
+Spark on YARN (Deploy Mode is `cluster` or `client`) requires Hadoop support. Similar to Spark support, the operation of supporting Hadoop is almost the same as the previous steps
+
+Ensure that `$HADOOP_HOME` and `$HADOOP_CONF_DIR` exists
+
+### How to support Spark 3?
+
+In fact, the way to submit applications with `spark-submit` is the same, regardless of Spark 1, 2 or 3. In other words, the semantics of `SPARK_HOME2` is the second `SPARK_HOME` instead of `SPARK2`'s `HOME`, so just set `SPARK_HOME2=/path/to/spark3`
+
+Take Spark 3.1.1 as an example:
+
+1. Download the Spark 3.1.1 release binary `spark-3.1.1-bin-hadoop2.7.tgz`
+
+2. Ensure that `common.sharedStoragePersistence.enabled` is turned on
+
+3. Run a DolphinScheduler release in Kubernetes (See **Installing the Chart**)
+
+4. Copy the Spark 3.1.1 release binary into the Docker container
+
+```bash
+kubectl cp spark-3.1.1-bin-hadoop2.7.tgz dolphinscheduler-worker-0:/opt/soft
+kubectl cp -n test spark-3.1.1-bin-hadoop2.7.tgz dolphinscheduler-worker-0:/opt/soft # with test namespace
+```
+
+5. Attach the container and ensure that `SPARK_HOME2` exists
+
+```bash
+kubectl exec -it dolphinscheduler-worker-0 bash
+kubectl exec -n test -it dolphinscheduler-worker-0 bash # with test namespace
+cd /opt/soft
+tar zxf spark-3.1.1-bin-hadoop2.7.tgz
+rm -f spark-3.1.1-bin-hadoop2.7.tgz
+ln -s spark-3.1.1-bin-hadoop2.7 spark2 # or just mv
+$SPARK_HOME2/bin/spark-submit --version
+```
+
+The last command will print the Spark version if everything goes well
+
+6. Verify Spark under a Shell task
+
+```
+$SPARK_HOME2/bin/spark-submit --class org.apache.spark.examples.SparkPi $SPARK_HOME2/examples/jars/spark-examples_2.12-3.1.1.jar
+```
+
+Check whether the task log contains the output like `Pi is roughly 3.146015`
+
+### How to support shared storage between Master, Worker and Api server?
+
+For example, Master, Worker and API server may use Hadoop at the same time
+
+1. Modify the following configurations in `values.yaml`
+
+```yaml
+common:
+  sharedStoragePersistence:
+    enabled: false
+    mountPath: "/opt/soft"
+    accessModes:
+    - "ReadWriteMany"
+    storageClassName: "-"
+    storage: "20Gi"
+```
+
+`storageClassName` and `storage` need to be modified to actual values
+
+> **Note**: `storageClassName` must support the access mode: `ReadWriteMany`
+
+2. Copy the Hadoop into the directory `/opt/soft`
+
+3. Ensure that `$HADOOP_HOME` and `$HADOOP_CONF_DIR` are correct
+
+### How to support local file resource storage instead of HDFS and S3?
+
+Modify the following configurations in `values.yaml`
+
+```yaml
+common:
+  configmap:
+    RESOURCE_STORAGE_TYPE: "HDFS"
+    RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
+    FS_DEFAULT_FS: "file:///"
+  fsFileResourcePersistence:
+    enabled: true
+    accessModes:
+    - "ReadWriteMany"
+    storageClassName: "-"
+    storage: "20Gi"
+```
+
+`storageClassName` and `storage` need to be modified to actual values
+
+> **Note**: `storageClassName` must support the access mode: `ReadWriteMany`
+
+### How to support S3 resource storage like MinIO?
+
+Take MinIO as an example: Modify the following configurations in `values.yaml`
+
+```yaml
+common:
+  configmap:
+    RESOURCE_STORAGE_TYPE: "S3"
+    RESOURCE_UPLOAD_PATH: "/dolphinscheduler"
+    FS_DEFAULT_FS: "s3a://BUCKET_NAME"
+    FS_S3A_ENDPOINT: "http://MINIO_IP:9000"
+    FS_S3A_ACCESS_KEY: "MINIO_ACCESS_KEY"
+    FS_S3A_SECRET_KEY: "MINIO_SECRET_KEY"
+```
+
+`BUCKET_NAME`, `MINIO_IP`, `MINIO_ACCESS_KEY` and `MINIO_SECRET_KEY` need to be modified to actual values
+
+> **Note**: `MINIO_IP` can only use IP instead of domain name, because DolphinScheduler currently doesn't support S3 path style access
+
+### How to configure SkyWalking?
+
+Modify SKYWALKING configurations in `values.yaml`:
+
+```yaml
+common:
+  configmap:
+    SKYWALKING_ENABLE: "true"
+    SW_AGENT_COLLECTOR_BACKEND_SERVICES: "127.0.0.1:11800"
+    SW_GRPC_LOG_SERVER_HOST: "127.0.0.1"
+    SW_GRPC_LOG_SERVER_PORT: "11800"
+```
+
+## Appendix-Configuration
+
+| Parameter                                                                         | Description                                                                                                                    | Default                                               |
+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
+| `timezone`                                                                        | World time and date for cities in all time zones                                                                               | `Asia/Shanghai`                                       |
+|                                                                                   |                                                                                                                                |                                                       |
+| `image.repository`                                                                | Docker image repository for the DolphinScheduler                                                                               | `apache/dolphinscheduler`                             |
+| `image.tag`                                                                       | Docker image version for the DolphinScheduler                                                                                  | `latest`                                              |
+| `image.pullPolicy`                                                                | Image pull policy. One of Always, Never, IfNotPresent                                                                          | `IfNotPresent`                                        |
+| `image.pullSecret`                                                                | Image pull secret. An optional reference to secret in the same namespace to use for pulling any of the images                  | `nil`                                                 |
+|                                                                                   |                                                                                                                                |                                                       |
+| `postgresql.enabled`                                                              | If not exists external PostgreSQL, by default, the DolphinScheduler will use a internal PostgreSQL                             | `true`                                                |
+| `postgresql.postgresqlUsername`                                                   | The username for internal PostgreSQL                                                                                           | `root`                                                |
+| `postgresql.postgresqlPassword`                                                   | The password for internal PostgreSQL                                                                                           | `root`                                                |
+| `postgresql.postgresqlDatabase`                                                   | The database for internal PostgreSQL                                                                                           | `dolphinscheduler`                                    |
+| `postgresql.persistence.enabled`                                                  | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL                                   | `false`                                               |
+| `postgresql.persistence.size`                                                     | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+| `postgresql.persistence.storageClass`                                             | PostgreSQL data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning      | `-`                                                   |
+| `externalDatabase.type`                                                           | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database type will use it       | `postgresql`                                          |
+| `externalDatabase.driver`                                                         | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database driver will use it     | `org.postgresql.Driver`                               |
+| `externalDatabase.host`                                                           | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database host will use it       | `localhost`                                           |
+| `externalDatabase.port`                                                           | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database port will use it       | `5432`                                                |
+| `externalDatabase.username`                                                       | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database username will use it   | `root`                                                |
+| `externalDatabase.password`                                                       | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database password will use it   | `root`                                                |
+| `externalDatabase.database`                                                       | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database database will use it   | `dolphinscheduler`                                    |
+| `externalDatabase.params`                                                         | If exists external PostgreSQL, and set `postgresql.enabled` value to false. DolphinScheduler's database params will use it     | `characterEncoding=utf8`                              |
+|                                                                                   |                                                                                                                                |                                                       |
+| `zookeeper.enabled`                                                               | If not exists external Zookeeper, by default, the DolphinScheduler will use a internal Zookeeper                               | `true`                                                |
+| `zookeeper.fourlwCommandsWhitelist`                                               | A list of comma separated Four Letter Words commands to use                                                                    | `srvr,ruok,wchs,cons`                                 |
+| `zookeeper.persistence.enabled`                                                   | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper                                     | `false`                                               |
+| `zookeeper.persistence.size`                                                      | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+| `zookeeper.persistence.storageClass`                                              | Zookeeper data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning       | `-`                                                   |
+| `zookeeper.zookeeperRoot`                                                         | Specify dolphinscheduler root directory in Zookeeper                                                                           | `/dolphinscheduler`                                   |
+| `externalZookeeper.zookeeperQuorum`                                               | If exists external Zookeeper, and set `zookeeper.enabled` value to false. Specify Zookeeper quorum                             | `127.0.0.1:2181`                                      |
+| `externalZookeeper.zookeeperRoot`                                                 | If exists external Zookeeper, and set `zookeeper.enabled` value to false. Specify dolphinscheduler root directory in Zookeeper | `/dolphinscheduler`                                   |
+|                                                                                   |                                                                                                                                |                                                       |
+| `common.configmap.DOLPHINSCHEDULER_OPTS`                                          | The jvm options for dolphinscheduler, suitable for all servers                                                                 | `""`                                                  |
+| `common.configmap.DATA_BASEDIR_PATH`                                              | User data directory path, self configuration, please make sure the directory exists and have read write permissions            | `/tmp/dolphinscheduler`                               |
+| `common.configmap.RESOURCE_STORAGE_TYPE`                                          | Resource storage type: HDFS, S3, NONE                                                                                          | `HDFS`                                                |
+| `common.configmap.RESOURCE_UPLOAD_PATH`                                           | Resource store on HDFS/S3 path, please make sure the directory exists on hdfs and have read write permissions                  | `/dolphinscheduler`                                   |
+| `common.configmap.FS_DEFAULT_FS`                                                  | Resource storage file system like `file:///`, `hdfs://mycluster:8020` or `s3a://dolphinscheduler`                              | `file:///`                                            |
+| `common.configmap.FS_S3A_ENDPOINT`                                                | S3 endpoint when `common.configmap.RESOURCE_STORAGE_TYPE` is set to `S3`                                                       | `s3.xxx.amazonaws.com`                                |
+| `common.configmap.FS_S3A_ACCESS_KEY`                                              | S3 access key when `common.configmap.RESOURCE_STORAGE_TYPE` is set to `S3`                                                     | `xxxxxxx`                                             |
+| `common.configmap.FS_S3A_SECRET_KEY`                                              | S3 secret key when `common.configmap.RESOURCE_STORAGE_TYPE` is set to `S3`                                                     | `xxxxxxx`                                             |
+| `common.configmap.HADOOP_SECURITY_AUTHENTICATION_STARTUP_STATE`                   | Whether to startup kerberos                                                                                                    | `false`                                               |
+| `common.configmap.JAVA_SECURITY_KRB5_CONF_PATH`                                   | The java.security.krb5.conf path                                                                                               | `/opt/krb5.conf`                                      |
+| `common.configmap.LOGIN_USER_KEYTAB_USERNAME`                                     | The login user from keytab username                                                                                            | `hdfs@HADOOP.COM`                                     |
+| `common.configmap.LOGIN_USER_KEYTAB_PATH`                                         | The login user from keytab path                                                                                                | `/opt/hdfs.keytab`                                    |
+| `common.configmap.KERBEROS_EXPIRE_TIME`                                           | The kerberos expire time, the unit is hour                                                                                     | `2`                                                   |
+| `common.configmap.HDFS_ROOT_USER`                                                 | The HDFS root user who must have the permission to create directories under the HDFS root path                                 | `hdfs`                                                |
+| `common.configmap.RESOURCE_MANAGER_HTTPADDRESS_PORT`                              | Set resource manager httpaddress port for yarn                                                                                 | `8088`                                                |
+| `common.configmap.YARN_RESOURCEMANAGER_HA_RM_IDS`                                 | If resourcemanager HA is enabled, please set the HA IPs                                                                        | `nil`                                                 |
+| `common.configmap.YARN_APPLICATION_STATUS_ADDRESS`                                | If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname, otherwise keep default          | `http://ds1:%s/ws/v1/cluster/apps/%s`               |
+| `common.configmap.SKYWALKING_ENABLE`                                              | Set whether to enable skywalking                                                                                               | `false`                                               |
+| `common.configmap.SW_AGENT_COLLECTOR_BACKEND_SERVICES`                            | Set agent collector backend services for skywalking                                                                            | `127.0.0.1:11800`                                     |
+| `common.configmap.SW_GRPC_LOG_SERVER_HOST`                                        | Set grpc log server host for skywalking                                                                                        | `127.0.0.1`                                           |
+| `common.configmap.SW_GRPC_LOG_SERVER_PORT`                                        | Set grpc log server port for skywalking                                                                                        | `11800`                                               |
+| `common.configmap.HADOOP_HOME`                                                    | Set `HADOOP_HOME` for DolphinScheduler's task environment                                                                      | `/opt/soft/hadoop`                                    |
+| `common.configmap.HADOOP_CONF_DIR`                                                | Set `HADOOP_CONF_DIR` for DolphinScheduler's task environment                                                                  | `/opt/soft/hadoop/etc/hadoop`                         |
+| `common.configmap.SPARK_HOME1`                                                    | Set `SPARK_HOME1` for DolphinScheduler's task environment                                                                      | `/opt/soft/spark1`                                    |
+| `common.configmap.SPARK_HOME2`                                                    | Set `SPARK_HOME2` for DolphinScheduler's task environment                                                                      | `/opt/soft/spark2`                                    |
+| `common.configmap.PYTHON_HOME`                                                    | Set `PYTHON_HOME` for DolphinScheduler's task environment                                                                      | `/usr/bin/python`                                     |
+| `common.configmap.JAVA_HOME`                                                      | Set `JAVA_HOME` for DolphinScheduler's task environment                                                                        | `/usr/local/openjdk-8`                                |
+| `common.configmap.HIVE_HOME`                                                      | Set `HIVE_HOME` for DolphinScheduler's task environment                                                                        | `/opt/soft/hive`                                      |
+| `common.configmap.FLINK_HOME`                                                     | Set `FLINK_HOME` for DolphinScheduler's task environment                                                                       | `/opt/soft/flink`                                     |
+| `common.configmap.DATAX_HOME`                                                     | Set `DATAX_HOME` for DolphinScheduler's task environment                                                                       | `/opt/soft/datax`                                     |
+| `common.sharedStoragePersistence.enabled`                                         | Set `common.sharedStoragePersistence.enabled` to `true` to mount a shared storage volume for Hadoop, Spark binary and etc      | `false`                                               |
+| `common.sharedStoragePersistence.mountPath`                                       | The mount path for the shared storage volume                                                                                   | `/opt/soft`                                           |
+| `common.sharedStoragePersistence.accessModes`                                     | `PersistentVolumeClaim` access modes, must be `ReadWriteMany`                                                                  | `[ReadWriteMany]`                                     |
+| `common.sharedStoragePersistence.storageClassName`                                | Shared Storage persistent volume storage class, must support the access mode: ReadWriteMany                                    | `-`                                                   |
+| `common.sharedStoragePersistence.storage`                                         | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+| `common.fsFileResourcePersistence.enabled`                                        | Set `common.fsFileResourcePersistence.enabled` to `true` to mount a new file resource volume for `api` and `worker`            | `false`                                               |
+| `common.fsFileResourcePersistence.accessModes`                                    | `PersistentVolumeClaim` access modes, must be `ReadWriteMany`                                                                  | `[ReadWriteMany]`                                     |
+| `common.fsFileResourcePersistence.storageClassName`                               | Resource persistent volume storage class, must support the access mode: ReadWriteMany                                          | `-`                                                   |
+| `common.fsFileResourcePersistence.storage`                                        | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+|                                                                                   |                                                                                                                                |                                                       |
+| `master.podManagementPolicy`                                                      | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down  | `Parallel`                                            |
+| `master.replicas`                                                                 | Replicas is the desired number of replicas of the given Template                                                               | `3`                                                   |
+| `master.annotations`                                                              | The `annotations` for master server                                                                                            | `{}`                                                  |
+| `master.affinity`                                                                 | If specified, the pod's scheduling constraints                                                                                 | `{}`                                                  |
+| `master.nodeSelector`                                                             | NodeSelector is a selector which must be true for the pod to fit on a node                                                     | `{}`                                                  |
+| `master.tolerations`                                                              | If specified, the pod's tolerations                                                                                            | `{}`                                                  |
+| `master.resources`                                                                | The `resource` limit and request config for master server                                                                      | `{}`                                                  |
+| `master.configmap.MASTER_SERVER_OPTS`                                             | The jvm options for master server                                                                                              | `-Xms1g -Xmx1g -Xmn512m`                              |
+| `master.configmap.MASTER_EXEC_THREADS`                                            | Master execute thread number to limit process instances                                                                        | `100`                                                 |
+| `master.configmap.MASTER_EXEC_TASK_NUM`                                           | Master execute task number in parallel per process instance                                                                    | `20`                                                  |
+| `master.configmap.MASTER_DISPATCH_TASK_NUM`                                       | Master dispatch task number per batch                                                                                          | `3`                                                   |
+| `master.configmap.MASTER_HOST_SELECTOR`                                           | Master host selector to select a suitable worker, optional values include Random, RoundRobin, LowerWeight                      | `LowerWeight`                                         |
+| `master.configmap.MASTER_HEARTBEAT_INTERVAL`                                      | Master heartbeat interval, the unit is second                                                                                  | `10`                                                  |
+| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES`                                  | Master commit task retry times                                                                                                 | `5`                                                   |
+| `master.configmap.MASTER_TASK_COMMIT_INTERVAL`                                    | master commit task interval, the unit is second                                                                                | `1`                                                   |
+| `master.configmap.MASTER_MAX_CPULOAD_AVG`                                         | Master max cpuload avg, only higher than the system cpu load average, master server can schedule                               | `-1` (`the number of cpu cores * 2`)                  |
+| `master.configmap.MASTER_RESERVED_MEMORY`                                         | Master reserved memory, only lower than system available memory, master server can schedule, the unit is G                     | `0.3`                                                 |
+| `master.livenessProbe.enabled`                                                    | Turn on and off liveness probe                                                                                                 | `true`                                                |
+| `master.livenessProbe.initialDelaySeconds`                                        | Delay before liveness probe is initiated                                                                                       | `30`                                                  |
+| `master.livenessProbe.periodSeconds`                                              | How often to perform the probe                                                                                                 | `30`                                                  |
+| `master.livenessProbe.timeoutSeconds`                                             | When the probe times out                                                                                                       | `5`                                                   |
+| `master.livenessProbe.failureThreshold`                                           | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `master.livenessProbe.successThreshold`                                           | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `master.readinessProbe.enabled`                                                   | Turn on and off readiness probe                                                                                                | `true`                                                |
+| `master.readinessProbe.initialDelaySeconds`                                       | Delay before readiness probe is initiated                                                                                      | `30`                                                  |
+| `master.readinessProbe.periodSeconds`                                             | How often to perform the probe                                                                                                 | `30`                                                  |
+| `master.readinessProbe.timeoutSeconds`                                            | When the probe times out                                                                                                       | `5`                                                   |
+| `master.readinessProbe.failureThreshold`                                          | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `master.readinessProbe.successThreshold`                                          | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `master.persistentVolumeClaim.enabled`                                            | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master`                                        | `false`                                               |
+| `master.persistentVolumeClaim.accessModes`                                        | `PersistentVolumeClaim` access modes                                                                                           | `[ReadWriteOnce]`                                     |
+| `master.persistentVolumeClaim.storageClassName`                                   | `Master` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning   | `-`                                                   |
+| `master.persistentVolumeClaim.storage`                                            | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+|                                                                                   |                                                                                                                                |                                                       |
+| `worker.podManagementPolicy`                                                      | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down  | `Parallel`                                            |
+| `worker.replicas`                                                                 | Replicas is the desired number of replicas of the given Template                                                               | `3`                                                   |
+| `worker.annotations`                                                              | The `annotations` for worker server                                                                                            | `{}`                                                  |
+| `worker.affinity`                                                                 | If specified, the pod's scheduling constraints                                                                                 | `{}`                                                  |
+| `worker.nodeSelector`                                                             | NodeSelector is a selector which must be true for the pod to fit on a node                                                     | `{}`                                                  |
+| `worker.tolerations`                                                              | If specified, the pod's tolerations                                                                                            | `{}`                                                  |
+| `worker.resources`                                                                | The `resource` limit and request config for worker server                                                                      | `{}`                                                  |
+| `worker.configmap.LOGGER_SERVER_OPTS`                                             | The jvm options for logger server                                                                                              | `-Xms512m -Xmx512m -Xmn256m`                          |
+| `worker.configmap.WORKER_SERVER_OPTS`                                             | The jvm options for worker server                                                                                              | `-Xms1g -Xmx1g -Xmn512m`                              |
+| `worker.configmap.WORKER_EXEC_THREADS`                                            | Worker execute thread number to limit task instances                                                                           | `100`                                                 |
+| `worker.configmap.WORKER_HEARTBEAT_INTERVAL`                                      | Worker heartbeat interval, the unit is second                                                                                  | `10`                                                  |
+| `worker.configmap.WORKER_MAX_CPULOAD_AVG`                                         | Worker max cpuload avg, only higher than the system cpu load average, worker server can be dispatched tasks                    | `-1` (`the number of cpu cores * 2`)                  |
+| `worker.configmap.WORKER_RESERVED_MEMORY`                                         | Worker reserved memory, only lower than system available memory, worker server can be dispatched tasks, the unit is G          | `0.3`                                                 |
+| `worker.configmap.WORKER_GROUPS`                                                  | Worker groups                                                                                                                  | `default`                                             |
+| `worker.livenessProbe.enabled`                                                    | Turn on and off liveness probe                                                                                                 | `true`                                                |
+| `worker.livenessProbe.initialDelaySeconds`                                        | Delay before liveness probe is initiated                                                                                       | `30`                                                  |
+| `worker.livenessProbe.periodSeconds`                                              | How often to perform the probe                                                                                                 | `30`                                                  |
+| `worker.livenessProbe.timeoutSeconds`                                             | When the probe times out                                                                                                       | `5`                                                   |
+| `worker.livenessProbe.failureThreshold`                                           | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `worker.livenessProbe.successThreshold`                                           | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `worker.readinessProbe.enabled`                                                   | Turn on and off readiness probe                                                                                                | `true`                                                |
+| `worker.readinessProbe.initialDelaySeconds`                                       | Delay before readiness probe is initiated                                                                                      | `30`                                                  |
+| `worker.readinessProbe.periodSeconds`                                             | How often to perform the probe                                                                                                 | `30`                                                  |
+| `worker.readinessProbe.timeoutSeconds`                                            | When the probe times out                                                                                                       | `5`                                                   |
+| `worker.readinessProbe.failureThreshold`                                          | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `worker.readinessProbe.successThreshold`                                          | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `worker.persistentVolumeClaim.enabled`                                            | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker`                            | `false`                                               |
+| `worker.persistentVolumeClaim.dataPersistentVolume.enabled`                       | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker`                  | `false`                                               |
+| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes`                   | `PersistentVolumeClaim` access modes                                                                                           | `[ReadWriteOnce]`                                     |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName`              | `Worker` data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning        | `-`                                                   |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storage`                       | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+| `worker.persistentVolumeClaim.logsPersistentVolume.enabled`                       | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker`                  | `false`                                               |
+| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes`                   | `PersistentVolumeClaim` access modes                                                                                           | `[ReadWriteOnce]`                                     |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName`              | `Worker` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning   | `-`                                                   |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storage`                       | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+|                                                                                   |                                                                                                                                |                                                       |
+| `alert.replicas`                                                                  | Replicas is the desired number of replicas of the given Template                                                               | `1`                                                   |
+| `alert.strategy.type`                                                             | Type of deployment. Can be "Recreate" or "RollingUpdate"                                                                       | `RollingUpdate`                                       |
+| `alert.strategy.rollingUpdate.maxSurge`                                           | The maximum number of pods that can be scheduled above the desired number of pods                                              | `25%`                                                 |
+| `alert.strategy.rollingUpdate.maxUnavailable`                                     | The maximum number of pods that can be unavailable during the update                                                           | `25%`                                                 |
+| `alert.annotations`                                                               | The `annotations` for alert server                                                                                             | `{}`                                                  |
+| `alert.affinity`                                                                  | If specified, the pod's scheduling constraints                                                                                 | `{}`                                                  |
+| `alert.nodeSelector`                                                              | NodeSelector is a selector which must be true for the pod to fit on a node                                                     | `{}`                                                  |
+| `alert.tolerations`                                                               | If specified, the pod's tolerations                                                                                            | `{}`                                                  |
+| `alert.resources`                                                                 | The `resource` limit and request config for alert server                                                                       | `{}`                                                  |
+| `alert.configmap.ALERT_SERVER_OPTS`                                               | The jvm options for alert server                                                                                               | `-Xms512m -Xmx512m -Xmn256m`                          |
+| `alert.configmap.XLS_FILE_PATH`                                                   | XLS file path                                                                                                                  | `/tmp/xls`                                            |
+| `alert.configmap.MAIL_SERVER_HOST`                                                | Mail `SERVER HOST `                                                                                                            | `nil`                                                 |
+| `alert.configmap.MAIL_SERVER_PORT`                                                | Mail `SERVER PORT`                                                                                                             | `nil`                                                 |
+| `alert.configmap.MAIL_SENDER`                                                     | Mail `SENDER`                                                                                                                  | `nil`                                                 |
+| `alert.configmap.MAIL_USER`                                                       | Mail `USER`                                                                                                                    | `nil`                                                 |
+| `alert.configmap.MAIL_PASSWD`                                                     | Mail `PASSWORD`                                                                                                                | `nil`                                                 |
+| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE`                                       | Mail `SMTP STARTTLS` enable                                                                                                    | `false`                                               |
+| `alert.configmap.MAIL_SMTP_SSL_ENABLE`                                            | Mail `SMTP SSL` enable                                                                                                         | `false`                                               |
+| `alert.configmap.MAIL_SMTP_SSL_TRUST`                                             | Mail `SMTP SSL TRUST`                                                                                                          | `nil`                                                 |
+| `alert.configmap.ENTERPRISE_WECHAT_ENABLE`                                        | `Enterprise Wechat` enable                                                                                                     | `false`                                               |
+| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID`                                       | `Enterprise Wechat` corp id                                                                                                    | `nil`                                                 |
+| `alert.configmap.ENTERPRISE_WECHAT_SECRET`                                        | `Enterprise Wechat` secret                                                                                                     | `nil`                                                 |
+| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID`                                      | `Enterprise Wechat` agent id                                                                                                   | `nil`                                                 |
+| `alert.configmap.ENTERPRISE_WECHAT_USERS`                                         | `Enterprise Wechat` users                                                                                                      | `nil`                                                 |
+| `alert.livenessProbe.enabled`                                                     | Turn on and off liveness probe                                                                                                 | `true`                                                |
+| `alert.livenessProbe.initialDelaySeconds`                                         | Delay before liveness probe is initiated                                                                                       | `30`                                                  |
+| `alert.livenessProbe.periodSeconds`                                               | How often to perform the probe                                                                                                 | `30`                                                  |
+| `alert.livenessProbe.timeoutSeconds`                                              | When the probe times out                                                                                                       | `5`                                                   |
+| `alert.livenessProbe.failureThreshold`                                            | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `alert.livenessProbe.successThreshold`                                            | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `alert.readinessProbe.enabled`                                                    | Turn on and off readiness probe                                                                                                | `true`                                                |
+| `alert.readinessProbe.initialDelaySeconds`                                        | Delay before readiness probe is initiated                                                                                      | `30`                                                  |
+| `alert.readinessProbe.periodSeconds`                                              | How often to perform the probe                                                                                                 | `30`                                                  |
+| `alert.readinessProbe.timeoutSeconds`                                             | When the probe times out                                                                                                       | `5`                                                   |
+| `alert.readinessProbe.failureThreshold`                                           | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `alert.readinessProbe.successThreshold`                                           | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `alert.persistentVolumeClaim.enabled`                                             | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert`                                          | `false`                                               |
+| `alert.persistentVolumeClaim.accessModes`                                         | `PersistentVolumeClaim` access modes                                                                                           | `[ReadWriteOnce]`                                     |
+| `alert.persistentVolumeClaim.storageClassName`                                    | `Alert` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning    | `-`                                                   |
+| `alert.persistentVolumeClaim.storage`                                             | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+|                                                                                   |                                                                                                                                |                                                       |
+| `api.replicas`                                                                    | Replicas is the desired number of replicas of the given Template                                                               | `1`                                                   |
+| `api.strategy.type`                                                               | Type of deployment. Can be "Recreate" or "RollingUpdate"                                                                       | `RollingUpdate`                                       |
+| `api.strategy.rollingUpdate.maxSurge`                                             | The maximum number of pods that can be scheduled above the desired number of pods                                              | `25%`                                                 |
+| `api.strategy.rollingUpdate.maxUnavailable`                                       | The maximum number of pods that can be unavailable during the update                                                           | `25%`                                                 |
+| `api.annotations`                                                                 | The `annotations` for api server                                                                                               | `{}`                                                  |
+| `api.affinity`                                                                    | If specified, the pod's scheduling constraints                                                                                 | `{}`                                                  |
+| `api.nodeSelector`                                                                | NodeSelector is a selector which must be true for the pod to fit on a node                                                     | `{}`                                                  |
+| `api.tolerations`                                                                 | If specified, the pod's tolerations                                                                                            | `{}`                                                  |
+| `api.resources`                                                                   | The `resource` limit and request config for api server                                                                         | `{}`                                                  |
+| `api.configmap.API_SERVER_OPTS`                                                   | The jvm options for api server                                                                                                 | `-Xms512m -Xmx512m -Xmn256m`                          |
+| `api.livenessProbe.enabled`                                                       | Turn on and off liveness probe                                                                                                 | `true`                                                |
+| `api.livenessProbe.initialDelaySeconds`                                           | Delay before liveness probe is initiated                                                                                       | `30`                                                  |
+| `api.livenessProbe.periodSeconds`                                                 | How often to perform the probe                                                                                                 | `30`                                                  |
+| `api.livenessProbe.timeoutSeconds`                                                | When the probe times out                                                                                                       | `5`                                                   |
+| `api.livenessProbe.failureThreshold`                                              | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `api.livenessProbe.successThreshold`                                              | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `api.readinessProbe.enabled`                                                      | Turn on and off readiness probe                                                                                                | `true`                                                |
+| `api.readinessProbe.initialDelaySeconds`                                          | Delay before readiness probe is initiated                                                                                      | `30`                                                  |
+| `api.readinessProbe.periodSeconds`                                                | How often to perform the probe                                                                                                 | `30`                                                  |
+| `api.readinessProbe.timeoutSeconds`                                               | When the probe times out                                                                                                       | `5`                                                   |
+| `api.readinessProbe.failureThreshold`                                             | Minimum consecutive successes for the probe                                                                                    | `3`                                                   |
+| `api.readinessProbe.successThreshold`                                             | Minimum consecutive failures for the probe                                                                                     | `1`                                                   |
+| `api.persistentVolumeClaim.enabled`                                               | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api`                                              | `false`                                               |
+| `api.persistentVolumeClaim.accessModes`                                           | `PersistentVolumeClaim` access modes                                                                                           | `[ReadWriteOnce]`                                     |
+| `api.persistentVolumeClaim.storageClassName`                                      | `api` logs data persistent volume storage class. If set to "-", storageClassName: "", which disables dynamic provisioning      | `-`                                                   |
+| `api.persistentVolumeClaim.storage`                                               | `PersistentVolumeClaim` size                                                                                                   | `20Gi`                                                |
+| `api.service.type`                                                                | `type` determines how the Service is exposed. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer            | `ClusterIP`                                           |
+| `api.service.clusterIP`                                                           | `clusterIP` is the IP address of the service and is usually assigned randomly by the master                                    | `nil`                                                 |
+| `api.service.nodePort`                                                            | `nodePort` is the port on each node on which this service is exposed when type=NodePort                                        | `nil`                                                 |
+| `api.service.externalIPs`                                                         | `externalIPs` is a list of IP addresses for which nodes in the cluster will also accept traffic for this service               | `[]`                                                  |
+| `api.service.externalName`                                                        | `externalName` is the external reference that kubedns or equivalent will return as a CNAME record for this service             | `nil`                                                 |
+| `api.service.loadBalancerIP`                                                      | `loadBalancerIP` when service.type is LoadBalancer. LoadBalancer will get created with the IP specified in this field          | `nil`                                                 |
+| `api.service.annotations`                                                         | `annotations` may need to be set when service.type is LoadBalancer                                                             | `{}`                                                  |
+|                                                                                   |                                                                                                                                |                                                       |
+| `ingress.enabled`                                                                 | Enable ingress                                                                                                                 | `false`                                               |
+| `ingress.host`                                                                    | Ingress host                                                                                                                   | `dolphinscheduler.org`                                |
+| `ingress.path`                                                                    | Ingress path                                                                                                                   | `/dolphinscheduler`                                   |
+| `ingress.tls.enabled`                                                             | Enable ingress tls                                                                                                             | `false`                                               |
+| `ingress.tls.secretName`                                                          | Ingress tls secret name                                                                                                        | `dolphinscheduler-tls`                                |
diff --git a/docs/2.0.9/docs/en/guide/installation/pseudo-cluster.md b/docs/2.0.9/docs/en/guide/installation/pseudo-cluster.md
new file mode 100644
index 0000000..5586978
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/pseudo-cluster.md
@@ -0,0 +1,192 @@
+# Pseudo-Cluster Deployment
+
+The purpose of pseudo-cluster deployment is to deploy the DolphinScheduler service on a single machine. In this mode, DolphinScheduler's master, worker, api server, and logger server are all on the same machine.
+
+If you are a green hand and want to experience DolphinScheduler, we recommended you install follow [Standalone](standalone.md). If you want to experience more complete functions or schedule large tasks number, we recommended you install follow [pseudo-cluster deployment](pseudo-cluster.md). If you want to using DolphinScheduler in production, we recommended you follow [cluster deployment](cluster.md) or [kubernetes](kubernetes.md)
+
+## Prepare
+
+Pseudo-cluster deployment of DolphinScheduler requires external software support
+
+* JDK:Download [JDK][jdk] (1.8+), and configure `JAVA_HOME` to and `PATH` variable. You can skip this step, if it already exists in your environment.
+* Binary package: Download the DolphinScheduler binary package at [download page](https://dolphinscheduler.apache.org/en-us/download)
+* Database: [PostgreSQL](https://www.postgresql.org/download/) (8.2.15+) or [MySQL](https://dev.mysql.com/downloads/mysql/) (5.7+), you can choose one of the two, such as MySQL requires JDBC Driver 8.0.16
+* Registry Center: [ZooKeeper](https://zookeeper.apache.org/releases.html) (3.4.6+),[download link][zookeeper]
+* Process tree analysis
+  * `pstree` for macOS
+  * `psmisc` for Fedora/Red/Hat/CentOS/Ubuntu/Debian
+
+> **_Note:_** DolphinScheduler itself does not depend on Hadoop, Hive, Spark, but if you need to run tasks that depend on them, you need to have the corresponding environment support
+
+## DolphinScheduler startup environment
+
+### Configure user exemption and permissions
+
+Create a deployment user, and be sure to configure `sudo` without password. We here make a example for user dolphinscheduler.
+
+```shell
+# To create a user, login as root
+useradd dolphinscheduler
+
+# Add password
+echo "dolphinscheduler" | passwd --stdin dolphinscheduler
+
+# Configure sudo without password
+sed -i '$adolphinscheduler  ALL=(ALL)  NOPASSWD: NOPASSWD: ALL' /etc/sudoers
+sed -i 's/Defaults    requirett/#Defaults    requirett/g' /etc/sudoers
+
+# Modify directory permissions and grant permissions for user you created above
+chown -R dolphinscheduler:dolphinscheduler apache-dolphinscheduler-*-bin
+```
+
+> **_NOTICE:_**
+>
+> * Because DolphinScheduler's multi-tenant task switch user by command `sudo -u {linux-user}`, the deployment user needs to have sudo privileges and is password-free. If novice learners don’t understand, you can ignore this point for the time being.
+> * If you find the line "Defaults requirest" in the `/etc/sudoers` file, please comment it
+
+### Configure machine SSH password-free login
+
+Since resources need to be sent to different machines during installation, SSH password-free login is required between each machine. The steps to configure password-free login are as follows
+
+```shell
+su dolphinscheduler
+
+ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+chmod 600 ~/.ssh/authorized_keys
+```
+
+> **_Notice:_** After the configuration is complete, you can run the command `ssh localhost` to test if it work or not, if you can login with ssh without password.
+
+### Start zookeeper
+
+Go to the zookeeper installation directory, copy configure file `zoo_sample.cfg` to `conf/zoo.cfg`, and change value of dataDir in `conf/zoo.cfg` to `dataDir=./tmp/zookeeper`
+
+```shell
+# Start zookeeper
+./bin/zkServer.sh start
+```
+
+## Modify configuration
+
+After completing the preparation of the basic environment, you need to modify the configuration file according to your environment. The configuration file is in the path of `conf/config/install_config.conf`. Generally, you just needs to modify the **INSTALL MACHINE, DolphinScheduler ENV, Database, Registry Server** part to complete the deployment, the following describes the parameters that must be modified
+
+```shell
+# ---------------------------------------------------------
+# INSTALL MACHINE
+# ---------------------------------------------------------
+# Because the master, worker, and API server are deployed on a single node, the IP of the server is the machine IP or localhost
+ips="localhost"
+masters="localhost"
+workers="localhost:default"
+alertServer="localhost"
+apiServers="localhost"
+pythonGatewayServers="localhost"
+
+# DolphinScheduler installation path, it will auto create if not exists
+installPath="~/dolphinscheduler"
+
+# Deploy user, use what you create in section **Configure machine SSH password-free login**
+deployUser="dolphinscheduler"
+
+# ---------------------------------------------------------
+# DolphinScheduler ENV
+# ---------------------------------------------------------
+# The path of JAVA_HOME, which JDK install path in section **Prepare**
+javaHome="/your/java/home/here"
+
+# ---------------------------------------------------------
+# Database
+# ---------------------------------------------------------
+# Database type, username, password, IP, port, metadata. For now dbtype supports `mysql` and `postgresql`, `H2`
+# Please make sure that the value of configuration is quoted in double quotation marks, otherwise may not take effect
+DATABASE_TYPE="mysql"
+SPRING_DATASOURCE_URL="jdbc:mysql://ds1:3306/ds_201_doc?useUnicode=true&characterEncoding=UTF-8"
+# Have to modify if you are not using dolphinscheduler/dolphinscheduler as your username and password
+SPRING_DATASOURCE_USERNAME="dolphinscheduler"
+SPRING_DATASOURCE_PASSWORD="dolphinscheduler"
+
+# ---------------------------------------------------------
+# Registry Server
+# ---------------------------------------------------------
+# Registration center address, the address of zookeeper service
+registryServers="localhost:2181"
+```
+
+## Initialize the database
+
+DolphinScheduler metadata is stored in relational database. Currently, PostgreSQL and MySQL are supported. If you use MySQL, you need to manually download [mysql-connector-java driver][mysql] (8.0.16) and move it to the lib directory of DolphinScheduler. Let's take MySQL as an example for how to initialize the database
+
+```shell
+mysql -uroot -p
+
+mysql> CREATE DATABASE dolphinscheduler DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
+
+# Change {user} and {password} by requests
+mysql> GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'%' IDENTIFIED BY '{password}';
+mysql> GRANT ALL PRIVILEGES ON dolphinscheduler.* TO '{user}'@'localhost' IDENTIFIED BY '{password}';
+
+mysql> flush privileges;
+```
+
+After above steps done you would create a new database for DolphinScheduler, then run shortcut Shell scripts to init database
+
+```shell
+sh script/create-dolphinscheduler.sh
+```
+
+## Start DolphinScheduler
+
+Use **deployment user** you created above, running the following command to complete the deployment, and the server log will be stored in the logs folder
+
+```shell
+sh install.sh
+```
+
+> **_Note:_** For the first time deployment, there maybe occur five times of `sh: bin/dolphinscheduler-daemon.sh: No such file or directory` in terminal
+, this is non-important information and you can ignore it.
+
+## Login DolphinScheduler
+
+The browser access address http://localhost:12345/dolphinscheduler can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
+
+## Start or stop server
+
+```shell
+# Stop all DolphinScheduler server
+sh ./bin/stop-all.sh
+
+# Start all DolphinScheduler server
+sh ./bin/start-all.sh
+
+# Start or stop DolphinScheduler Master
+sh ./bin/dolphinscheduler-daemon.sh stop master-server
+sh ./bin/dolphinscheduler-daemon.sh start master-server
+
+# Start or stop DolphinScheduler Worker
+sh ./bin/dolphinscheduler-daemon.sh start worker-server
+sh ./bin/dolphinscheduler-daemon.sh stop worker-server
+
+# Start or stop DolphinScheduler Api
+sh ./bin/dolphinscheduler-daemon.sh start api-server
+sh ./bin/dolphinscheduler-daemon.sh stop api-server
+
+# Start or stop Logger
+sh ./bin/dolphinscheduler-daemon.sh start logger-server
+sh ./bin/dolphinscheduler-daemon.sh stop logger-server
+
+# Start or stop Alert
+sh ./bin/dolphinscheduler-daemon.sh start alert-server
+sh ./bin/dolphinscheduler-daemon.sh stop alert-server
+
+# Start or stop Python Gateway Server
+sh ./bin/dolphinscheduler-daemon.sh start python-gateway-server
+sh ./bin/dolphinscheduler-daemon.sh stop python-gateway-server
+```
+
+> **_Note:_**: Please refer to the section of "System Architecture Design" for service usage
+
+[jdk]: https://www.oracle.com/technetwork/java/javase/downloads/index.html
+[zookeeper]: https://zookeeper.apache.org/releases.html
+[mysql]: https://downloads.MySQL.com/archives/c-j/
+[issue]: https://github.com/apache/dolphinscheduler/issues/6597
diff --git a/docs/2.0.9/docs/en/guide/installation/standalone.md b/docs/2.0.9/docs/en/guide/installation/standalone.md
new file mode 100644
index 0000000..1c79494
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/installation/standalone.md
@@ -0,0 +1,42 @@
+# Standalone
+
+Standalone only for quick look for DolphinScheduler.
+
+If you are a green hand and want to experience DolphinScheduler, we recommended you install follow [Standalone](standalone.md). If you want to experience more complete functions or schedule large tasks number, we recommended you install follow [pseudo-cluster deployment](pseudo-cluster.md). If you want to using DolphinScheduler in production, we recommended you follow [cluster deployment](cluster.md) or [kubernetes](kubernetes.md)
+
+> **_Note:_** Standalone only recommends the use of less than 20 workflows, because it uses H2 Database, Zookeeper Testing Server, too many tasks may cause instability
+
+## Prepare
+
+* JDK:Download [JDK][jdk] (1.8+), and configure `JAVA_HOME` to and `PATH` variable. You can skip this step, if it already exists in your environment.
+* Binary package: Download the DolphinScheduler binary package at [download page](https://dolphinscheduler.apache.org/en-us/download)
+
+## Start DolphinScheduler Standalone Server
+
+### Extract and start DolphinScheduler
+
+There is a standalone startup script in the binary compressed package, which can be quickly started after extract. Switch to a user with sudo permission and run the script
+
+```shell
+# Extract and start Standalone Server
+tar -xvzf apache-dolphinscheduler-*-bin.tar.gz
+cd apache-dolphinscheduler-*-bin
+sh ./bin/dolphinscheduler-daemon.sh start standalone-server
+```
+
+### Login DolphinScheduler
+
+The browser access address http://localhost:12345/dolphinscheduler can login DolphinScheduler UI. The default username and password are **admin/dolphinscheduler123**
+
+## start/stop server
+
+The script `./bin/dolphinscheduler-daemon.sh` can not only quickly start standalone, but also stop the service operation. All the commands are as follows
+
+```shell
+# Start Standalone Server
+sh ./bin/dolphinscheduler-daemon.sh start standalone-server
+# Stop Standalone Server
+sh ./bin/dolphinscheduler-daemon.sh stop standalone-server
+```
+
+[jdk]: https://www.oracle.com/technetwork/java/javase/downloads/index.html
diff --git a/docs/2.0.9/docs/en/guide/introduction.md b/docs/2.0.9/docs/en/guide/introduction.md
new file mode 100644
index 0000000..052267d
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/introduction.md
@@ -0,0 +1,3 @@
+# User Manual
+
+User Manual show you how to play with DolphinScheduler, if you do not installed, please see [Quick Start](./quick-start.md) to install DolphinScheduler before going forward.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/monitor.md b/docs/2.0.9/docs/en/guide/monitor.md
new file mode 100644
index 0000000..2bad35e
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/monitor.md
@@ -0,0 +1,48 @@
+
+# Monitor
+
+## Service management
+
+- Service management is mainly to monitor and display the health status and basic information of each service in the system
+
+## master monitoring
+
+- Mainly related to master information.
+<p align="center">
+   <img src="/img/master-jk-en.png" width="80%" />
+ </p>
+
+## worker monitoring
+
+- Mainly related to worker information.
+
+<p align="center">
+   <img src="/img/worker-jk-en.png" width="80%" />
+ </p>
+
+## Zookeeper monitoring
+
+- Mainly related configuration information of each worker and master in ZooKeeper.
+
+<p alignlinux ="center">
+   <img src="/img/zookeeper-monitor-en.png" width="80%" />
+ </p>
+
+## DB monitoring
+
+- Mainly the health of the DB
+
+<p align="center">
+   <img src="/img/mysql-jk-en.png" width="80%" />
+ </p>
+
+## Statistics management
+
+<p align="center">
+   <img src="/img/statistics-en.png" width="80%" />
+ </p>
+
+- Number of commands to be executed: statistics on the t_ds_command table
+- The number of failed commands: statistics on the t_ds_error_command table
+- Number of tasks to run: Count the data of task_queue in Zookeeper
+- Number of tasks to be killed: Count the data of task_kill in Zookeeper
diff --git a/docs/2.0.9/docs/en/guide/observability/skywalking-agent.md b/docs/2.0.9/docs/en/guide/observability/skywalking-agent.md
new file mode 100644
index 0000000..46c0dca
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/observability/skywalking-agent.md
@@ -0,0 +1,74 @@
+SkyWalking Agent
+=============================
+
+The dolphinscheduler-skywalking module provides [SkyWalking](https://skywalking.apache.org/) monitor agent for the Dolphinscheduler project.
+
+This document describes how to enable SkyWalking 8.4+ support with this module (recommended to use SkyWalking 8.5.0).
+
+# Installation
+
+The following configuration is used to enable SkyWalking agent.
+
+### Through environment variable configuration (for Docker Compose)
+
+Modify SkyWalking environment variables in `docker/docker-swarm/config.env.sh`:
+
+```
+SKYWALKING_ENABLE=true
+SW_AGENT_COLLECTOR_BACKEND_SERVICES=127.0.0.1:11800
+SW_GRPC_LOG_SERVER_HOST=127.0.0.1
+SW_GRPC_LOG_SERVER_PORT=11800
+```
+
+And run
+
+```shell
+$ docker-compose up -d
+```
+
+### Through environment variable configuration (for Docker)
+
+```shell
+$ docker run -d --name dolphinscheduler \
+-e DATABASE_HOST="192.168.x.x" -e DATABASE_PORT="5432" -e DATABASE_DATABASE="dolphinscheduler" \
+-e DATABASE_USERNAME="test" -e DATABASE_PASSWORD="test" \
+-e ZOOKEEPER_QUORUM="192.168.x.x:2181" \
+-e SKYWALKING_ENABLE="true" \
+-e SW_AGENT_COLLECTOR_BACKEND_SERVICES="your.skywalking-oap-server.com:11800" \
+-e SW_GRPC_LOG_SERVER_HOST="your.skywalking-log-reporter.com" \
+-e SW_GRPC_LOG_SERVER_PORT="11800" \
+-p 12345:12345 \
+apache/dolphinscheduler:2.0.9 all
+```
+
+### Through install_config.conf configuration (for DolphinScheduler install.sh)
+
+Add the following configurations to `${workDir}/conf/config/install_config.conf`.
+
+```properties
+
+# SkyWalking config
+# note: enable SkyWalking tracking plugin
+enableSkywalking="true"
+# note: configure SkyWalking backend service address
+skywalkingServers="your.skywalking-oap-server.com:11800"
+# note: configure SkyWalking log reporter host
+skywalkingLogReporterHost="your.skywalking-log-reporter.com"
+# note: configure SkyWalking log reporter port
+skywalkingLogReporterPort="11800"
+
+```
+
+# Usage
+
+### Import Dashboard
+
+#### Import DolphinScheduler Dashboard to SkyWalking Sever
+
+Copy the `${dolphinscheduler.home}/ext/skywalking-agent/dashboard/dolphinscheduler.yml` file into `${skywalking-oap-server.home}/config/ui-initialized-templates/` directory, and restart SkyWalking oap-server.
+
+#### View DolphinScheduler Dashboard
+
+If you have opened SkyWalking dashboard with a browser before, you need to clear the browser cache.
+
+![img1](/img/skywalking/import-dashboard-1.jpg)
diff --git a/docs/2.0.9/docs/en/guide/open-api.md b/docs/2.0.9/docs/en/guide/open-api.md
new file mode 100644
index 0000000..e93737a
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/open-api.md
@@ -0,0 +1,64 @@
+# Open API
+
+## Background
+Generally, projects and processes are created through pages, but integration with third-party systems requires API calls to manage projects and workflows.
+
+## The Operation Steps of DS API Calls
+
+### Create a token
+1. Log in to the scheduling system, click "Security", then click "Token manage" on the left, and click "Create token" to create a token.
+
+<p align="center">
+   <img src="/img/token-management-en.png" width="80%" />
+ </p>
+
+2. Select the "Expiration time" (Token validity), select "User" (to perform the API operation with the specified user), click "Generate token", copy the Token string, and click "Submit"
+
+<p align="center">
+   <img src="/img/create-token-en1.png" width="80%" />
+ </p>
+
+### Use token
+1. Open the API documentation page
+    > Address:http://{api server ip}:12345/dolphinscheduler/doc.html?language=en_US&lang=en
+<p align="center">
+   <img src="/img/api-documentation-en.png" width="80%" />
+ </p>
+ 
+2. select a test API, the API selected for this test: queryAllProjectList
+    > projects/query-project-list
+                                                                             >
+3. Open Postman, fill in the API address, and enter the Token in Headers, and then send the request to view the result
+    ```
+    token: The Token just generated
+    ```
+<p align="center">
+   <img src="/img/test-api.png" width="80%" />
+ </p>  
+
+### Create a project
+Here is an example of creating a project named "wudl-flink-test":
+<p align="center">
+   <img src="/img/api/create_project1.png" width="80%" />
+ </p>
+
+<p align="center">
+   <img src="/img/api/create_project2.png" width="80%" />
+ </p>
+
+<p align="center">
+   <img src="/img/api/create_project3.png" width="80%" />
+ </p>
+The returned msg information is "success", indicating that we have successfully created the project through API.
+
+If you are interested in the source code of the project, please continue to read the following:
+### Appendix:The source code of creating a project
+<p align="center">
+   <img src="/img/api/create_source1.png" width="80%" />
+ </p>
+
+<p align="center">
+   <img src="/img/api/create_source2.png" width="80%" />
+ </p>
+
+
diff --git a/docs/2.0.9/docs/en/guide/parameter/built-in.md b/docs/2.0.9/docs/en/guide/parameter/built-in.md
new file mode 100644
index 0000000..2c88bed
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/parameter/built-in.md
@@ -0,0 +1,48 @@
+# Built-in Parameter
+
+## Basic Built-in Parameter
+
+<table>
+    <tr><th>variable</th><th>declaration method</th><th>meaning</th></tr>
+    <tr>
+        <td>system.biz.date</td>
+        <td>${system.biz.date}</td>
+        <td>The day before the scheduled time of the daily scheduling instance, the format is yyyyMMdd</td>
+    </tr>
+    <tr>
+        <td>system.biz.curdate</td>
+        <td>${system.biz.curdate}</td>
+        <td>The timing time of the daily scheduling instance, the format is yyyyMMdd</td>
+    </tr>
+    <tr>
+        <td>system.datetime</td>
+        <td>${system.datetime}</td>
+        <td>The timing time of the daily scheduling instance, the format is yyyyMMddHHmmss</td>
+    </tr>
+</table>
+
+## Extended Built-in Parameter
+
+- Support custom variable names in the code, declaration method: \${variable name}. It can refer to [basic built-in parameter](#basic-built-in-parameter) or specify "constants".
+
+- We define this benchmark variable as \$[...] format, \$[yyyyMMddHHmmss] can be decomposed and combined arbitrarily, such as: \$[yyyyMMdd], \$[HHmmss], \$[yyyy-MM-dd], etc.
+
+- Or the 2 following methods may be useful:
+
+      1. use add_month(yyyyMMdd, offset) function to add/minus number of months
+      the first parameter of this function is yyyyMMdd, representing the time format user will get
+      the second is offset, representing the number of months the user wants to add or minus
+      * Next N years:$[add_months(yyyyMMdd,12*N)]
+      * N years before:$[add_months(yyyyMMdd,-12*N)]
+      * Next N months:$[add_months(yyyyMMdd,N)]
+      * N months before:$[add_months(yyyyMMdd,-N)]
+      *********************************************************************************************************
+      1. add numbers directly after the time format
+      * Next N weeks:$[yyyyMMdd+7*N]
+      * First N weeks:$[yyyyMMdd-7*N]
+      * Next N days:$[yyyyMMdd+N]
+      * N days before:$[yyyyMMdd-N]
+      * Next N hours:$[HHmmss+N/24]
+      * First N hours:$[HHmmss-N/24]
+      * Next N minutes:$[HHmmss+N/24/60]
+      * First N minutes:$[HHmmss-N/24/60]
diff --git a/docs/2.0.9/docs/en/guide/parameter/context.md b/docs/2.0.9/docs/en/guide/parameter/context.md
new file mode 100644
index 0000000..062d5cd
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/parameter/context.md
@@ -0,0 +1,63 @@
+# Parameter Context
+
+DolphinScheduler provides the ability to refer to each other between parameters, including: local parameters refer to global parameters, and upstream and downstream parameter transfer. Because of the existence of references, it involves the priority of parameters when the parameter names are the same. see also [Parameter Priority](priority.md)
+
+## Local task use global parameter
+
+The premise of local tasks referencing global parameters is that you have already defined [Global Parameter](global.md). The usage is similar to the usage in [local parameters](local.md), but the value of the parameter needs to be configured as the key in the global parameter
+
+![parameter-call-global-in-local](/img/global_parameter.png)
+
+As shown in the figure above, `${biz_date}` and `${biz_curdate}` are examples of local parameters referencing global parameters. Observe the last line of the above figure, local_param_bizdate uses \${global_bizdate} to refer to the global parameter. In the shell script, you can use \${local_param_bizdate} to refer to the value of the global variable global_bizdate, or set the value of local_param_bizdate directly through JDBC. In the same way, local_param refers to the global parameters defined in the previous section through ${local_param}. ​Biz_date, biz_curdate, system.datetime are all user-defined parameters, which are assigned via ${global parameters}.
+
+## Pass parameter from upstream task to downstream
+
+DolphinScheduler Parameter transfer between tasks is allowed, and the current transfer direction only supports one-way transfer from upstream to downstream. The task types currently supporting this feature are:
+
+* [Shell](../task/shell.md)
+* [SQL](../task/sql.md)
+* [Procedure](../task/stored-procedure.md)
+
+When defining an upstream node, if there is a need to transmit the result of that node to a downstream node that has a dependency. You need to set a parameter whose direction is OUT in [Custom Parameters] of [Current Node Settings]. At present, we mainly focus on the function of SQL and SHELL nodes that can input parameters.
+
+prop is user-specified; the direction is selected as OUT, and will be defined as parameter output only when the direction is OUT. The data type can be chosen from different data structures as needed; the value part is not required.
+
+If the result of the SQL node  has only one row, one or more fields, the name of the prop needs to be the same as the field name. The data type can be chosen to be something other than LIST. The parameter will select the value corresponding to the column with the same name as this parameter in the column name in the SQL query result.
+
+If the result of the SQL node is multiple rows, one or more fields, the name of the prop needs to be the same as the name of the field. The data type is selected as LIST, and the SQL query result will be converted to LIST, and the result will be converted to JSON as the value of the corresponding parameter.
+
+Let's take another example of the process that contains the SQL node in the above picture:
+
+The [createParam1] node in the above figure is defined as follows:
+
+![png05](/img/globalParam/image-20210723104957031.png)
+
+ [createParam2] node is defined as follows:
+
+![png06](/img/globalParam/image-20210723105026924.png)
+
+You can find the value of the variable in the [Workflow Instance] page to find the corresponding node instance.
+
+Node instance [createparam1] is as follows:
+
+![png07](/img/globalParam/image-20210723105131381.png)
+
+Here, the value of "id" is equal to 12.
+
+Let's see the case of the node instance [createparam2].
+
+![png08](/img/globalParam/image-20210723105255850.png)
+
+There is only the value of "id". Although the user-defined sql looks up the fields "id" and "database_name", only one parameter is set because only one parameter "id" is defined for out. For display reasons, the length of the list is already checked for you here as 10.
+
+### SHELL
+
+prop is user-specified. The direction is selected as OUT. The output is defined as a parameter only when the direction is OUT. Data type can choose different data structures as needed; the value part is not required to be filled. The user needs to pass the parameter, and when defining the shell script, the output format of ${setValue(key=value)} statement is required, key is the prop of the corresponding parameter, and value is the value of the parameter.
+
+For example, `echo '${setValue(trans = Hello trans)}'`, set "trans" to "Hello trans", and the variable trans can be used in downstream tasks:
+
+<img src="/img/globalParam/trans-shell.png" alt="trans-shell" style="zoom:50%;" />
+
+When the shell node is defined, when the log detects the format of ${setValue (key = value1)}, value1 will be assigned to the key, and the downstream node can directly use the value of the variable key. Similarly, you can find the corresponding node instance on the workflow instance page to view the value of the variable.
+
+<img src="/img/globalParam/use-parameter-shell.png" alt="use-parameter-shell" style="zoom:50%;" />
diff --git a/docs/2.0.9/docs/en/guide/parameter/global.md b/docs/2.0.9/docs/en/guide/parameter/global.md
new file mode 100644
index 0000000..e9db8e7
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/parameter/global.md
@@ -0,0 +1,19 @@
+# Global Parameter
+
+## Scope
+
+The parameters configured on the workflow definition dialog, the whole workflow is it's scope.
+
+## Usage
+
+the approach to set global parameters is, after defining the workflow, click the 'save' button, then click the '+' button below the 'Set global':
+
+<p align="center">
+   <img src="/img/supplement_global_parameter_en.png" width="80%" />
+ </p>
+
+<p align="center">
+   <img src="/img/local_parameter_en.png" width="80%" />
+ </p>
+
+The global_bizdate parameter defined here can be referenced by local parameters of any other task node, and the value of global_bizdate is set to the figure obtained by referencing the system parameter system.biz.date
diff --git a/docs/2.0.9/docs/en/guide/parameter/local.md b/docs/2.0.9/docs/en/guide/parameter/local.md
new file mode 100644
index 0000000..28887e6
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/parameter/local.md
@@ -0,0 +1,19 @@
+# Local Parameter
+
+## Scope
+
+Parameters configured on the task definition dialog, the scope of this parameter only for this task, but if you configured follow [Parameter Context](context.md), it could passed follow task.
+
+## Usage
+
+The approach to set local parameters is, double-click on any node while defining the workflow and click the '+' button next to the 'Custom Parameters':
+
+<p align="center">
+     <img src="/img/supplement_local_parameter_en.png" width="80%" />
+</p>
+
+<p align="center">
+     <img src="/img/global_parameter_en.png" width="80%" />
+</p>
+
+If you want to call the [built-in parameter](built-in.md) in the local parameters, fill in the value corresponding to the built-in parameters in `value`, as in the above figure, `${biz_date}` and `${biz_curdate}`
diff --git a/docs/2.0.9/docs/en/guide/parameter/priority.md b/docs/2.0.9/docs/en/guide/parameter/priority.md
new file mode 100644
index 0000000..69a9b16
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/parameter/priority.md
@@ -0,0 +1,40 @@
+# Parameter Priority
+
+DolphinScheduler definition of parameter values ​​involved in may come from three types:
+
+* [Global Parameter](global.md): Parameters defined when the workflow saves page definitions
+* [Parameter Context](context.md): Parameters passed by upstream nodes
+* [Local Parameter](local.md):The node's own parameters, which is the parameters defined by the user in [Custom Parameters]. The user can define the value of this part of the parameters at the time of workflow definition.
+
+Because the value of a parameter has multiple sources, when the parameter name is the same, there needs to be a parameter priority problem. The priority of DolphinScheduler parameters from high to low is: `Startup Parameter > Local Parameter > Parameter Context > Global Parameter`
+
+In the case of parameters passed by the upstream task, there may be multiple tasks upstream to pass parameters to the downstream. When the parameter names passed upstream are the same:
+
+* Downstream nodes will preferentially use parameters with non-empty values
+* If there are multiple parameters with non-empty values, sort according to the completion time of the upstream task, and select the parameter corresponding to the upstream task with the earliest completion time
+
+## Example
+
+For example, the relationships are shown in the figures below:
+
+1: The first case is explained by the shell nodes.
+
+![png01](/img/globalParam/image-20210723102938239.png)
+
+The [useParam] node can use the parameters which is set in the [createParam] node. The [useParam] node does not have a dependency on the [noUseParam] node, so it does not get the parameters of the [noUseParam] node. The above picture is just an example of a shell node, other types of nodes have the same usage rules.
+
+![png02](/img/globalParam/image-20210723103316896.png)
+
+Among all, the [createParam] node can use parameters directly. In addition, the node sets two parameters named "key" and "key1". Here the user defines a parameter named "key1" with the same name as the one passed by the upstream node and copies the value "12". However, due to the priority we set, the value "12" here would be discarded and the parameter value set by the upstream node would be finally used.
+
+2: Let's explain another situation in SQL nodes.
+
+![png03](/img/globalParam/image-20210723103937052.png)
+
+The definition of the [use_create] node is as follows:
+
+![png04](/img/globalParam/image-20210723104411489.png)
+
+"status" is the own parameters of the node set by the current node. However, the user also sets the "status" parameter when saving, assigning its value to -1. Then the value of status will be -1 with higher priority when the SQL is executed. The value of the node's own variable is discarded.
+
+The "ID" here is the parameter set by the upstream node. The user sets the parameters of the same parameter name "ID" for the [createparam1] node and [createparam2] node. And the [use_create] node uses the value of [createParam1] which is finished first.
diff --git a/docs/2.0.9/docs/en/guide/project/project-list.md b/docs/2.0.9/docs/en/guide/project/project-list.md
new file mode 100644
index 0000000..37c7b9f
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/project/project-list.md
@@ -0,0 +1,21 @@
+# Project
+
+## Create project
+
+- Click "Project Management" to enter the project management page, click the "Create Project" button, enter the project name, project description, and click "Submit" to create a new project.
+
+  <p align="center">
+      <img src="/img/create_project_en1.png" width="80%" />
+  </p>
+
+## Project home
+
+- Click the project name link on the project management page to enter the project home page, as shown in the figure below, the project home page contains the task status statistics, process status statistics, and workflow definition statistics of the project. The introduction for those metric:
+
+- Task status statistics: within the specified time range, count the number of task instances as successful submission, running, ready to pause, pause, ready to stop, stop, failure, success, fault tolerance, kill, and waiting threads
+- Process status statistics: within the specified time range, count the number of the status of the workflow instance as submission success, running, ready to pause, pause, ready to stop, stop, failure, success, fault tolerance, kill, and waiting threads
+- Workflow definition statistics: Count the workflow definitions created by this user and the workflow definitions granted to this user by the administrator
+
+  <p align="center">
+     <img src="/img/project_home_en.png" width="80%" />
+  </p>
diff --git a/docs/2.0.9/docs/en/guide/project/task-instance.md b/docs/2.0.9/docs/en/guide/project/task-instance.md
new file mode 100644
index 0000000..49280f5
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/project/task-instance.md
@@ -0,0 +1,12 @@
+
+## Task instance
+
+- Click Project Management -> Workflow -> Task Instance to enter the task instance page, as shown in the figure below, click the name of the workflow instance, you can jump to the workflow instance DAG chart to view the task status.
+     <p align="center">
+        <img src="/img/task-list-en.png" width="80%" />
+     </p>
+
+- View log:Click the "view log" button in the operation column to view the log of task execution.
+     <p align="center">
+        <img src="/img/task-log2-en.png" width="80%" />
+     </p>
diff --git a/docs/2.0.9/docs/en/guide/project/workflow-definition.md b/docs/2.0.9/docs/en/guide/project/workflow-definition.md
new file mode 100644
index 0000000..590d3e7
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/project/workflow-definition.md
@@ -0,0 +1,114 @@
+# Workflow definition
+
+## Create workflow definition
+
+- Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, and click the "Create Workflow" button to enter the **workflow DAG edit** page, as shown in the following figure:
+  <p align="center">
+      <img src="/img/dag5.png" width="80%" />
+  </p>
+- Drag in the toolbar <img src="/img/tasks/icons/shell.png" width="15"/> Add a Shell task to the drawing board, as shown in the figure below:
+
+  ![demo-shell-simple](/img/tasks/demo/shell.jpg)
+
+- **Add parameter settings for this shell task:**
+
+1. Fill in the "Node Name", "Description", and "Script" fields;
+2. Check “Normal” for “Run Flag”. If “Prohibit Execution” is checked, the task will not be executed when the workflow runs;
+3. Select "Task Priority": When the number of worker threads is insufficient, high-level tasks will be executed first in the execution queue, and tasks with the same priority will be executed in the order of first in, first out;
+4. Timeout alarm (optional): Check the timeout alarm, timeout failure, and fill in the "timeout period". When the task execution time exceeds **timeout period**, an alert email will be sent and the task timeout fails;
+5. Resources (optional). Resource files are files created or uploaded on the Resource Center -> File Management page. For example, the file name is `test.sh`, and the command to call the resource in the script is `sh test.sh`;
+6. Customize parameters (optional);
+7. Click the "Confirm Add" button to save the task settings.
+
+- **Increase the order of task execution:** Click the icon in the upper right corner <img src="/img/line.png" width="35"/> to connect the task; as shown in the figure below, task 2 and task 3 are executed in parallel, When task 1 finished executing, tasks 2 and 3 will be executed simultaneously.
+
+  <p align="center">
+     <img src="/img/dag6.png" width="80%" />
+  </p>
+
+- **Delete dependencies:** Click the "arrow" icon in the upper right corner <img src="/img/arrow.png" width="35"/>, select the connection line, and click the "Delete" icon in the upper right corner <img src= "/img/delete.png" width="35"/>, delete dependencies between tasks.
+  <p align="center">
+     <img src="/img/dag7.png" width="80%" />
+  </p>
+
+- **Save workflow definition:** Click the "Save" button, and the "Set DAG chart name" pop-up box will pop up, as shown in the figure below. Enter the workflow definition name, workflow definition description, and set global parameters (optional, refer to [global parameters](../parameter/global.md)), click the "Add" button, and the workflow definition is created successfully.
+  <p align="center">
+     <img src="/img/dag8.png" width="80%" />
+   </p>
+> For other types of tasks, please refer to [Task Node Type and Parameter Settings](#TaskParamers). <!-- markdown-link-check-disable-line -->
+
+## Workflow definition operation function
+
+Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, as shown below:
+
+<p align="center">
+<img src="/img/work_list_en.png" width="80%" />
+</p>
+The operation functions of the workflow definition list are as follows:
+
+- **Edit:** Only "offline" workflow definitions can be edited. Workflow DAG editing is the same as [Create Workflow Definition](#create-workflow-definition).
+- **Online:** When the workflow status is "Offline", used to online workflow. Only the workflow in the "Online" state can run, but cannot be edited.
+- **Offline:** When the workflow status is "Online", used to offline workflow. Only the workflow in the "Offline" state can be edited, but not run.
+- **Run:** Only workflow in the online state can run. See [2.3.3 Run Workflow](#run-the-workflow) for the operation steps.
+- **Timing:** Timing can only be set in online workflows, and the system automatically schedules the workflow to run on a regular basis. The status after creating a timing is "offline", and the timing must be online on the timing management page to take effect. See [2.3.4 Workflow Timing](#workflow-timing) for timing operation steps.
+- **Timing Management:** The timing management page can be edited, online/offline, and deleted.
+- **Delete:** Delete the workflow definition.
+- **Download:** Download workflow definition to local.
+- **Tree Diagram:** Display the task node type and task status in a tree structure, as shown in the figure below:
+  <p align="center">
+      <img src="/img/tree_en.png" width="80%" />
+  </p>
+
+## Run the workflow
+
+- Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, as shown in the figure below, click the "Go Online" button <img src="/img/online.png" width="35"/>,Go online workflow.
+  <p align="center">
+      <img src="/img/work_list_en.png" width="80%" />
+  </p>
+
+- Click the "Run" button to pop up the startup parameter setting pop-up box, as shown in the figure below, set the startup parameters, click the "Run" button in the pop-up box, the workflow starts running, and the workflow instance page generates a workflow instance.
+     <p align="center">
+       <img src="/img/run_work_en.png" width="80%" />
+     </p>  
+  Description of workflow operating parameters: 
+       
+      * Failure strategy: When a task node fails to execute, other parallel task nodes need to execute the strategy. "Continue" means: after a certain task fails, other task nodes execute normally; "End" means: terminate all tasks being executed, and terminate the entire process.
+      * Notification strategy: When the process is over, the process execution information notification email is sent according to the process status, including any status is not sent, successful sent, failed sent, successful or failed sent.
+      * Process priority: The priority of process operation, divided into five levels: highest (HIGHEST), high (HIGH), medium (MEDIUM), low (LOW), and lowest (LOWEST). When the number of master threads is insufficient, high-level processes will be executed first in the execution queue, and processes with the same priority will be executed in a first-in first-out order.
+      * Worker group: The process can only be executed in the specified worker machine group. The default is Default, which can be executed on any worker.
+      * Notification group: select notification strategy||timeout alarm||when fault tolerance occurs, process information or email will be sent to all members in the notification group.
+      * Recipient: Select notification policy||Timeout alarm||When fault tolerance occurs, process information or alarm email will be sent to the recipient list.
+      * Cc: Select the notification strategy||Timeout alarm||When fault tolerance occurs, the process information or warning email will be copied to the CC list.
+      * Startup parameter: Set or overwrite global parameter values when starting a new process instance.
+      * Complement: Two modes including serial complement and parallel complement. Serial complement: Within the specified time range, the complements are executed from the start date to the end date and N process instances are generated in turn; parallel complement: within the specified time range, multiple days are complemented at the same time to generate N process instances.
+    * For example, you need to fill in the data from May 1 to May 10.
+
+    <p align="center">
+        <img src="/img/complement_en1.png" width="80%" />
+    </p>
+
+  > Serial mode: The complement is executed sequentially from May 1 to May 10, and ten process instances are generated on the process instance page;
+
+  > Parallel mode: The tasks from May 1 to may 10 are executed simultaneously, and 10 process instances are generated on the process instance page.
+
+## Workflow timing
+
+- Create timing: Click Project Management->Workflow->Workflow Definition, enter the workflow definition page, go online the workflow, click the "timing" button <img src="/img/timing.png" width="35"/> ,The timing parameter setting dialog box pops up, as shown in the figure below:
+  <p align="center">
+      <img src="/img/time_schedule_en.png" width="80%" />
+  </p>
+- Choose the start and end time. In the start and end time range, the workflow is run at regular intervals; not in the start and end time range, no more regular workflow instances are generated.
+- Add a timing that is executed once every day at 5 AM, as shown in the following figure:
+  <p align="center">
+      <img src="/img/timer-en.png" width="80%" />
+  </p>
+- Failure strategy, notification strategy, process priority, worker group, notification group, recipient, and CC are the same as workflow running parameters.
+- Click the "Create" button to create the timing successfully. At this time, the timing status is "**Offline**" and the timing needs to be **Online** to take effect.
+- Timing online: Click the "timing management" button <img src="/img/timeManagement.png" width="35"/>, enter the timing management page, click the "online" button, the timing status will change to "online", as shown in the below figure, the workflow takes effect regularly.
+  <p align="center">
+      <img src="/img/time-manage-list-en.png" width="80%" />
+  </p>
+
+## Import workflow
+
+Click Project Management -> Workflow -> Workflow Definition to enter the workflow definition page, click the "Import Workflow" button to import the local workflow file, the workflow definition list displays the imported workflow, and the status is offline.
diff --git a/docs/2.0.9/docs/en/guide/project/workflow-instance.md b/docs/2.0.9/docs/en/guide/project/workflow-instance.md
new file mode 100644
index 0000000..4a8c909
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/project/workflow-instance.md
@@ -0,0 +1,62 @@
+# Workflow instance
+
+## View workflow instance
+
+- Click Project Management -> Workflow -> Workflow Instance to enter the Workflow Instance page, as shown in the figure below:
+     <p align="center">
+        <img src="/img/instance-list-en.png" width="80%" />
+     </p>
+- Click the workflow name to enter the DAG view page to view the task execution status, as shown in the figure below.
+  <p align="center">
+    <img src="/img/instance-runs-en.png" width="80%" />
+  </p>
+
+## View task log
+
+- Enter the workflow instance page, click the workflow name, enter the DAG view page, double-click the task node, as shown in the following figure:
+   <p align="center">
+     <img src="/img/instanceViewLog-en.png" width="80%" />
+   </p>
+- Click "View Log", a log pop-up box will pop up, as shown in the figure below, the task log can also be viewed on the task instance page, refer to [Task View Log](./task-instance.md)。
+   <p align="center">
+     <img src="/img/task-log-en.png" width="80%" />
+   </p>
+
+## View task history
+
+- Click Project Management -> Workflow -> Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;
+- Double-click the task node, as shown in the figure below, click "View History" to jump to the task instance page, and display a list of task instances running by the workflow instance
+   <p align="center">
+     <img src="/img/task_history_en.png" width="80%" />
+   </p>
+
+## View operating parameters
+
+- Click Project Management -> Workflow -> Workflow Instance to enter the workflow instance page, and click the workflow name to enter the workflow DAG page;
+- Click the icon in the upper left corner <img src="/img/run_params_button.png" width="35"/>,View the startup parameters of the workflow instance; click the icon <img src="/img/global_param.png" width="35"/>,View the global and local parameters of the workflow instance, as shown in the following figure:
+   <p align="center">
+     <img src="/img/run_params_en.png" width="80%" />
+   </p>
+
+## Workflow instance operation function
+
+Click Project Management -> Workflow -> Workflow Instance to enter the Workflow Instance page, as shown in the figure below:
+
+  <p align="center">
+    <img src="/img/instance-list-en.png" width="80%" />
+  </p>
+
+- **Edit:** Only terminated processes can be edited. Click the "Edit" button or the name of the workflow instance to enter the DAG edit page. After edit, click the "Save" button to pop up the Save DAG pop-up box, as shown in the figure below. In the pop-up box, check "Whether to update to workflow definition" and save After that, the workflow definition will be updated; if it is not checked, the workflow definition will not be updated.
+     <p align="center">
+       <img src="/img/editDag-en.png" width="80%" />
+     </p>
+- **Rerun:** Re-execute the terminated process.
+- **Recovery failed:** For failed processes, you can perform recovery operations, starting from the failed node.
+- **Stop:** To **stop** the running process, the background will first `kill`worker process, and then execute `kill -9` operation
+- **Pause:** Perform a **pause** operation on the running process, the system status will change to **waiting for execution**, it will wait for the end of the task being executed, and pause the next task to be executed.
+- **Resume pause:** To resume the paused process, start running directly from the **paused node**
+- **Delete:** Delete the workflow instance and the task instance under the workflow instance
+- **Gantt chart:** The vertical axis of the Gantt chart is the topological sorting of task instances under a certain workflow instance, and the horizontal axis is the running time of the task instances, as shown in the figure:
+     <p align="center">
+         <img src="/img/gantt-en.png" width="80%" />
+     </p>
diff --git a/docs/2.0.9/docs/en/guide/quick-start.md b/docs/2.0.9/docs/en/guide/quick-start.md
new file mode 100644
index 0000000..facf6b5
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/quick-start.md
@@ -0,0 +1,71 @@
+# Quick Start
+
+* Administrator user login
+
+  > Address:http://localhost:12345/dolphinscheduler  Username and password: admin/dolphinscheduler123
+
+<p align="center">
+   <img src="/img/login_en.png" width="60%" />
+ </p>
+
+* Create queue
+
+<p align="center">
+   <img src="/img/create-queue-en.png" width="60%" />
+ </p>
+
+  * Create tenant
+      <p align="center">
+    <img src="/img/create-tenant-en.png" width="60%" />
+  </p>
+
+  * Creating Ordinary Users
+<p align="center">
+      <img src="/img/create-user-en.png" width="60%" />
+ </p>
+
+  * Create an alarm group
+
+ <p align="center">
+    <img src="/img/alarm-group-en.png" width="60%" />
+  </p>
+
+  
+  * Create a worker group
+  
+   <p align="center">
+      <img src="/img/worker-group-en.png" width="60%" />
+    </p>
+
+   * Create environment
+
+   <p align="center">
+    <img src="/img/create-environment.png" width="60%" />
+   </p>
+    
+ * Create a token
+  
+   <p align="center">
+      <img src="/img/token-en.png" width="60%" />
+    </p>
+     
+  
+  * Login with regular users
+  > Click on the user name in the upper right corner to "exit" and re-use the normal user login.
+
+  * Project Management - > Create Project - > Click on Project Name
+<p align="center">
+      <img src="/img/create_project_en.png" width="60%" />
+ </p>
+
+  * Click Workflow Definition - > Create Workflow Definition - > Online Process Definition
+
+<p align="center">
+   <img src="/img/process_definition_en.png" width="60%" />
+ </p>
+
+  * Running Process Definition - > Click Workflow Instance - > Click Process Instance Name - > Double-click Task Node - > View Task Execution Log
+
+ <p align="center">
+   <img src="/img/log_en.png" width="60%" />
+</p>
diff --git a/docs/2.0.9/docs/en/guide/resource.md b/docs/2.0.9/docs/en/guide/resource.md
new file mode 100644
index 0000000..9c60bd1
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/resource.md
@@ -0,0 +1,120 @@
+# Resource Center
+
+If you want to use the resource upload function, you can select the local file directory for a single machine(this operation does not need to deploy Hadoop). Or you can also upload to a Hadoop or MinIO cluster, at this time, you need to have Hadoop (2.6+) or MinIO and other related environments
+
+> **_Note:_**
+>
+> * If the resource upload function is used, the deployment user in [installation and deployment](installation/standalone.md) must to have operation authority
+> * If you using Hadoop cluster with HA, you need to enable HDFS resource upload, and you need to copy the `core-site.xml` and `hdfs-site.xml` under the Hadoop cluster to `/opt/dolphinscheduler/conf`, otherwise Skip step
+
+## hdfs resource configuration
+
+- Upload resource files and udf functions, all uploaded files and resources will be stored on hdfs, so the following configuration items are required:
+
+```
+conf/common/common.properties
+    # Users who have permission to create directories under the HDFS root path
+    hdfs.root.user=hdfs
+    # data base dir, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions。"/escheduler" is recommended
+    data.store2hdfs.basepath=/dolphinscheduler
+    # resource upload startup type : HDFS,S3,NONE
+    res.upload.startup.type=HDFS
+    # whether kerberos starts
+    hadoop.security.authentication.startup.state=false
+    # java.security.krb5.conf path
+    java.security.krb5.conf.path=/opt/krb5.conf
+    # loginUserFromKeytab user
+    login.user.keytab.username=hdfs-mycluster@ESZ.COM
+    # loginUserFromKeytab path
+    login.user.keytab.path=/opt/hdfs.headless.keytab
+
+conf/common/hadoop.properties
+    # ha or single namenode,If namenode ha needs to copy core-site.xml and hdfs-site.xml
+    # to the conf directory,support s3,for example : s3a://dolphinscheduler
+    fs.defaultFS=hdfs://mycluster:8020
+    #resourcemanager ha note this need ips , this empty if single
+    yarn.resourcemanager.ha.rm.ids=192.168.xx.xx,192.168.xx.xx
+    # If it is a single resourcemanager, you only need to configure one host name. If it is resourcemanager HA, the default configuration is fine
+    yarn.application.status.address=http://xxxx:8088/ws/v1/cluster/apps/%s
+
+```
+
+- Only one address needs to be configured for yarn.resourcemanager.ha.rm.ids and yarn.application.status.address, and the other address is empty.
+- You need to copy core-site.xml and hdfs-site.xml from the conf directory of the Hadoop cluster to the conf directory of the dolphinscheduler project, and restart the api-server service.
+
+## File management
+
+> It is the management of various resource files, including creating basic txt/log/sh/conf/py/java and other files, uploading jar packages and other types of files, and can do edit, rename, download, delete and other operations.
+
+  <p align="center">
+   <img src="/img/file-manage-en.png" width="80%" />
+ </p>
+
+- Create a file
+  > The file format supports the following types: txt, log, sh, conf, cfg, py, java, sql, xml, hql, properties
+
+<p align="center">
+   <img src="/img/file_create_en.png" width="80%" />
+ </p>
+
+- upload files
+
+> Upload file: Click the "Upload File" button to upload, drag the file to the upload area, the file name will be automatically completed with the uploaded file name
+
+<p align="center">
+   <img src="/img/file-upload-en.png" width="80%" />
+ </p>
+
+- File View
+
+> For the file types that can be viewed, click the file name to view the file details
+
+<p align="center">
+   <img src="/img/file_detail_en.png" width="80%" />
+ </p>
+
+- download file
+
+> Click the "Download" button in the file list to download the file or click the "Download" button in the upper right corner of the file details to download the file
+
+- File rename
+
+<p align="center">
+   <img src="/img/file_rename_en.png" width="80%" />
+ </p>
+
+- delete
+  > File list -> Click the "Delete" button to delete the specified file
+
+- Re-upload file
+
+  > Re-upload file: Click the "Re-upload File" button to upload a new file to replace the old file, drag the file to the re-upload area, the file name will be automatically completed with the new file name
+
+    <p align="center">
+      <img src="/img/reupload_file_en.png" width="80%" />
+    </p>
+
+## UDF management
+
+### Resource management
+
+> The resource management and file management functions are similar. The difference is that the resource management is the uploaded UDF function, and the file management uploads the user program, script and configuration file.
+> Operation function: rename, download, delete.
+
+- Upload udf resources
+  > Same as uploading files.
+
+### Function management
+
+- Create UDF function
+  > Click "Create UDF Function", enter the udf function parameters, select the udf resource, and click "Submit" to create the udf function.
+
+> Currently only supports temporary UDF functions of HIVE
+
+- UDF function name: the name when the UDF function is entered
+- Package name Class name: Enter the full path of the UDF function
+- UDF resource: Set the resource file corresponding to the created UDF
+
+<p align="center">
+   <img src="/img/udf_edit_en.png" width="80%" />
+ </p>
diff --git a/docs/2.0.9/docs/en/guide/security.md b/docs/2.0.9/docs/en/guide/security.md
new file mode 100644
index 0000000..1e57498
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/security.md
@@ -0,0 +1,163 @@
+
+# Security
+
+* Only the administrator account in the security center has the authority to operate. It has functions such as queue management, tenant management, user management, alarm group management, worker group management, token management, etc. In the user management module, resources, data sources, projects, etc. Authorization
+* Administrator login, default user name and password: admin/dolphinscheduler123
+
+## Create queue
+
+- Queue is used when the "queue" parameter is needed to execute programs such as spark and mapreduce.
+- The administrator enters the Security Center->Queue Management page and clicks the "Create Queue" button to create a queue.
+<p align="center">
+   <img src="/img/create-queue-en.png" width="80%" />
+ </p>
+
+## Add tenant
+
+- The tenant corresponds to the Linux user, which is used by the worker to submit the job. Task will fail if Linux does not exists this user. You can set the parameter `worker.tenant.auto.create` as `true` in configuration file `worker.properties`. After that DolphinScheduler would create user if not exists, The property `worker.tenant.auto.create=true` requests worker run `sudo` command without password.
+- Tenant Code: **Tenant Code is the only user on Linux and cannot be repeated**
+- The administrator enters the Security Center->Tenant Management page and clicks the "Create Tenant" button to create a tenant.
+
+ <p align="center">
+    <img src="/img/addtenant-en.png" width="80%" />
+  </p>
+
+## Create normal user
+
+- Users are divided into **administrator users** and **normal users**
+
+  - The administrator has authorization and user management authority, but does not have the authority to create project and workflow definition operations.
+  - Ordinary users can create projects and create, edit, and execute workflow definitions.
+  - Note: If the user switches tenants, all resources under the tenant where the user belongs will be copied to the new tenant that is switched.
+
+- The administrator enters the Security Center -> User Management page and clicks the "Create User" button to create a user.
+<p align="center">
+   <img src="/img/user-en.png" width="80%" />
+ </p>
+
+> **Edit user information**
+
+- The administrator enters the Security Center->User Management page and clicks the "Edit" button to edit user information.
+- After an ordinary user logs in, click the user information in the user name drop-down box to enter the user information page, and click the "Edit" button to edit the user information.
+
+> **Modify user password**
+
+- The administrator enters the Security Center->User Management page and clicks the "Edit" button. When editing user information, enter the new password to modify the user password.
+- After a normal user logs in, click the user information in the user name drop-down box to enter the password modification page, enter the password and confirm the password and click the "Edit" button, then the password modification is successful.
+
+## Create alarm group
+
+- The alarm group is a parameter set at startup. After the process ends, the status of the process and other information will be sent to the alarm group in the form of email.
+
+* The administrator enters the Security Center -> Alarm Group Management page and clicks the "Create Alarm Group" button to create an alarm group.
+
+  <p align="center">
+    <img src="/img/mail-en.png" width="80%" />
+
+## Token management
+
+> Since the back-end interface has login check, token management provides a way to perform various operations on the system by calling the interface.
+
+- The administrator enters the Security Center -> Token Management page, clicks the "Create Token" button, selects the expiration time and user, clicks the "Generate Token" button, and clicks the "Submit" button, then the selected user's token is created successfully.
+
+  <p align="center">
+      <img src="/img/create-token-en.png" width="80%" />
+   </p>
+
+- After an ordinary user logs in, click the user information in the user name drop-down box, enter the token management page, select the expiration time, click the "generate token" button, and click the "submit" button, then the user creates a token successfully.
+- Call example:
+
+```java
+    /**
+     * test token
+     */
+    public  void doPOSTParam()throws Exception{
+        // create HttpClient
+        CloseableHttpClient httpclient = HttpClients.createDefault();
+
+        // create http post request
+        HttpPost httpPost = new HttpPost("http://127.0.0.1:12345/escheduler/projects/create");
+        httpPost.setHeader("token", "123");
+        // set parameters
+        List<NameValuePair> parameters = new ArrayList<NameValuePair>();
+        parameters.add(new BasicNameValuePair("projectName", "qzw"));
+        parameters.add(new BasicNameValuePair("desc", "qzw"));
+        UrlEncodedFormEntity formEntity = new UrlEncodedFormEntity(parameters);
+        httpPost.setEntity(formEntity);
+        CloseableHttpResponse response = null;
+        try {
+            // execute
+            response = httpclient.execute(httpPost);
+            // response status code 200
+            if (response.getStatusLine().getStatusCode() == 200) {
+                String content = EntityUtils.toString(response.getEntity(), "UTF-8");
+                System.out.println(content);
+            }
+        } finally {
+            if (response != null) {
+                response.close();
+            }
+            httpclient.close();
+        }
+    }
+```
+
+## Granted permission
+
+    * Granted permissions include project permissions, resource permissions, data source permissions, UDF function permissions.
+    * The administrator can authorize the projects, resources, data sources and UDF functions not created by ordinary users. Because the authorization methods for projects, resources, data sources and UDF functions are the same, we take project authorization as an example.
+    * Note: For projects created by users themselves, the user has all permissions. The project list and the selected project list will not be displayed.
+
+- The administrator enters the Security Center -> User Management page and clicks the "Authorize" button of the user who needs to be authorized, as shown in the figure below:
+ <p align="center">
+  <img src="/img/auth-en.png" width="80%" />
+</p>
+
+- Select the project to authorize the project.
+
+<p align="center">
+   <img src="/img/auth-project-en.png" width="80%" />
+ </p>
+
+- Resources, data sources, and UDF function authorization are the same as project authorization.
+
+## Worker grouping
+
+Each worker node will belong to its own worker group, and the default group is "default".
+
+When the task is executed, the task can be assigned to the specified worker group, and the task will be executed by the worker node in the group.
+
+> Add/Update worker group
+
+- Open the "conf/worker.properties" configuration file on the worker node where you want to set the groups, and modify the "worker.groups" parameter
+- The "worker.groups" parameter is followed by the name of the group corresponding to the worker node, which is “default”.
+- If the worker node corresponds to more than one group, they are separated by commas
+
+```conf
+worker.groups=default,test
+```
+- You can also modify the worker group for worker which be assigned to specific worker group, and if the modification is successful, the worker will use the new group and ignore the configuration in `worker.properties`. The step to modify it as below: "security center -> worker group management -> click 'new worker group' -> click 'new worker group' ->  enter 'group name' -> select exists worker -> click submit". 
+
+## Environmental Management
+
+* Configure the Worker operating environment online. A Worker can specify multiple environments, and each environment is equivalent to the dolphinscheduler_env.sh file.
+
+* The default environment is the dolphinscheduler_env.sh file.
+
+* When the task is executed, the task can be assigned to the designated worker group, and the corresponding environment can be selected according to the worker group. Finally, the worker node executes the environment first and then executes the task.
+
+> Add/Update environment
+
+- The environment configuration is equivalent to the configuration in the dolphinscheduler_env.sh file.
+
+  <p align="center">
+      <img src="/img/create-environment.png" width="80%" />
+  </p>
+
+> Use environment
+
+- Create a task node in the workflow definition and select the environment corresponding to the Worker group and the Worker group. When the task is executed, the Worker will execute the environment first before executing the task.
+
+    <p align="center">
+        <img src="/img/use-environment.png" width="80%" />
+    </p>
diff --git a/docs/2.0.9/docs/en/guide/task/conditions.md b/docs/2.0.9/docs/en/guide/task/conditions.md
new file mode 100644
index 0000000..345bee8
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/conditions.md
@@ -0,0 +1,36 @@
+# Conditions
+
+Conditions is a condition node, determining which downstream task should be run based on the condition set to it. For now, the Conditions support multiple upstream tasks, but only two downstream tasks. When the number of upstream tasks exceeds one, complex upstream dependencies can be achieved through `and` and `or` operators.
+
+## Create
+
+Drag in the toolbar<img src="/img/conditions.png" width="20"/>The task node to the drawing board to create a new Conditions task, as shown in the figure below:
+
+  <p align="center">
+   <img src="/img/condition_dag_en.png" width="80%" />
+  </p>
+
+  <p align="center">
+   <img src="/img/condition_task_en.png" width="80%" />
+  </p>
+
+## Parameter
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- Downstream tasks: Supports two branches for now, success and failure
+  - Success: When the Conditions task runs successfully, run this downstream task
+  - Failure: When the Conditions task runs fails, run this downstream task
+- Upstream condition selection: one or more upstream tasks can be selected for conditions
+  - Add the upstream dependency: Use the first parameter to choose task name, and the second parameter for status of the upsteam task.
+  - Upstream task relationship: we use `and` and `or` operators to handle complex relationship of upstream when multiple upstream tasks for Conditions task
+
+## Related task
+
+[switch](switch.md): [Condition](conditions.md)task mainly executes the corresponding branch based on the execution status (success, failure) of the upstream node. The [Switch](switch.md) task mainly executes the corresponding branch based on the value of the [global parameter](../parameter/global.md) and the judgment expression result written by the user.
diff --git a/docs/2.0.9/docs/en/guide/task/datax.md b/docs/2.0.9/docs/en/guide/task/datax.md
new file mode 100644
index 0000000..f6436bc
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/datax.md
@@ -0,0 +1,18 @@
+
+# DATAX
+
+- Drag in the toolbar<img src="/img/datax.png" width="35"/>Task node into the drawing board
+
+  <p align="center">
+   <img src="/img/datax-en.png" width="80%" />
+  </p>
+
+- Custom template: When you turn on the custom template switch, you can customize the content of the json configuration file of the datax node (applicable when the control configuration does not meet the requirements)
+- Data source: select the data source to extract the data
+- sql statement: the sql statement used to extract data from the target database, the sql query column name is automatically parsed when the node is executed, and mapped to the target table synchronization column name. When the source table and target table column names are inconsistent, they can be converted by column alias (as)
+- Target library: select the target library for data synchronization
+- Target table: the name of the target table for data synchronization
+- Pre-sql: Pre-sql is executed before the sql statement (executed by the target library).
+- Post-sql: Post-sql is executed after the sql statement (executed by the target library).
+- json: json configuration file for datax synchronization
+- Custom parameters: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the \${variable} in the SQL statement.
diff --git a/docs/2.0.9/docs/en/guide/task/dependent.md b/docs/2.0.9/docs/en/guide/task/dependent.md
new file mode 100644
index 0000000..97c2940
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/dependent.md
@@ -0,0 +1,27 @@
+# DEPENDENT
+
+- Dependent nodes are **dependency check nodes**. For example, process A depends on the successful execution of process B yesterday, and the dependent node will check whether process B has a successful execution yesterday.
+
+> Drag the ![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_DEPENDENT.png) task node in the toolbar to the drawing board, as shown in the following figure:
+
+<p align="center">
+   <img src="/img/dependent-nodes-en.png" width="80%" />
+ </p>
+
+> The dependent node provides a logical judgment function, such as checking whether the B process was successful yesterday, or whether the C process was executed successfully.
+
+  <p align="center">
+   <img src="/img/depend-node-en.png" width="80%" />
+ </p>
+
+> For example, process A is a weekly report task, processes B and C are daily tasks, and task A requires tasks B and C to be successfully executed every day of the last week, as shown in the figure:
+
+ <p align="center">
+   <img src="/img/depend-node1-en.png" width="80%" />
+ </p>
+
+> If the weekly report A also needs to be executed successfully last Tuesday:
+
+ <p align="center">
+   <img src="/img/depend-node3-en.png" width="80%" />
+ </p>
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/task/flink.md b/docs/2.0.9/docs/en/guide/task/flink.md
new file mode 100644
index 0000000..18c15f0
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/flink.md
@@ -0,0 +1,65 @@
+# Flink
+
+## Overview
+
+Flink task type for executing Flink programs. For Flink nodes, the worker submits the task by using the flink command `flink run`. See [flink cli](https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/deployment/cli/) for more details.
+
+## Create task
+
+- Click Project Management -> Project Name -> Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag the <img src="/img/tasks/icons/flink.png" width="15"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- **Node name**: The node name in a workflow definition is unique.
+- **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- **Descriptive information**: describe the function of the node.
+- **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- **Environment Name**: Configure the environment name in which to run the script.
+- **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
+- **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
+- **Delayed execution time**: the time, in cents, that a task is delayed in execution.
+- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- **Program type**: supports Java、Scala and Python.
+- **The class of main function**: is the full path of Main Class, the entry point of the Flink program.
+- **Resource**: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.
+- **Main jar package**: is the Flink jar package.
+- **Deployment mode**: support three modes of cluster and local 
+- **Task name** (option): Flink task name.
+- **jobManager memory number**: This is used to set the number of jobManager memories, which can be set according to the actual production environment.
+- **Number of slots**: This is used to set the number of Slots, which can be set according to the actual production environment.
+- **taskManager memory number**: This is used to set the number of taskManager memories, which can be set according to the actual production environment.
+- **Number of taskManage**: This is used to set the number of taskManagers, which can be set according to the actual production environment.
+- **Custom parameters**: It is a user-defined parameter that is part of MapReduce, which will replace the content with ${variable} in the script.
+- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+- **Parallelism**: Used to set the degree of parallelism for executing Flink tasks.
+- **Main program parameters**: et the input parameters of the Flink program and support the substitution of custom parameter variables.
+- **Other parameters**: support `--jars`, `--files`,` --archives`, `--conf` format.
+- **Resource**: If the resource file is referenced in other parameters, you need to select and specify in the resource.
+- **Custom parameter**: It is a local user-defined parameter of Flink, which will replace the content with ${variable} in the script.
+- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+
+## Task Example
+
+### Execute the WordCount program
+
+This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text. (Flink's releases come with this example job)
+
+#### Uploading the main package
+
+When using the Flink task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the [resource center](../resource.md).
+
+After configuring the Resource Centre, you can upload the required target files directly using drag and drop.
+
+![resource_upload](/img/tasks/demo/upload_flink.png)
+
+#### Configuring Flink nodes
+
+Simply configure the required content according to the parameter descriptions above.
+
+![demo-flink-simple](/img/tasks/demo/flink_task.png)
+
+## Notice
+
+ JAVA and Scala are only used for identification, there is no difference, if it is Flink developed by Python, there is no class of the main function, the others are the same.
diff --git a/docs/2.0.9/docs/en/guide/task/http.md b/docs/2.0.9/docs/en/guide/task/http.md
new file mode 100644
index 0000000..6072e66
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/http.md
@@ -0,0 +1,23 @@
+
+# HTTP
+
+- Drag in the toolbar<img src="/img/http.png" width="35"/>The task node to the drawing board, as shown in the following figure:
+
+<p align="center">
+   <img src="/img/http-en.png" width="80%" />
+ </p>
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- Request address: http request URL.
+- Request type: support GET, POSt, HEAD, PUT, DELETE.
+- Request parameters: Support Parameter, Body, Headers.
+- Verification conditions: support default response code, custom response code, content included, content not included.
+- Verification content: When the verification condition selects a custom response code, the content contains, and the content does not contain, the verification content is required.
+- Custom parameter: It is a user-defined parameter of http part, which will replace the content with \${variable} in the script.
diff --git a/docs/2.0.9/docs/en/guide/task/map-reduce.md b/docs/2.0.9/docs/en/guide/task/map-reduce.md
new file mode 100644
index 0000000..3e8c068
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/map-reduce.md
@@ -0,0 +1,66 @@
+# MapReduce
+
+## Overview
+
+- MapReduce(MR) task type for executing MapReduce programs. For MapReduce nodes, the worker submits the task by using the Hadoop command `hadoop jar`. See [Hadoop Command Manual](https://hadoop.apache.org/docs/r3.2.4/hadoop-project-dist/hadoop-common/CommandsManual.html#jar) for more details.
+
+## Create Task
+
+- Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag the <img src="/img/tasks/icons/mr.png" width="15"/> from the toolbar to the drawing board.
+## Task Parameter
+
+-    **Node name**: The node name in a workflow definition is unique.
+-    **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+-    **Descriptive information**: describe the function of the node.
+-    **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+-    **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+-    **Environment Name**: Configure the environment name in which to run the script.
+-    **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
+-    **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
+-    **Delayed execution time**: the time, in cents, that a task is delayed in execution.
+-    **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+-    **Resource**: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.
+-    **Custom parameters**: It is a user-defined parameter that is part of MapReduce, which will replace the content with ${variable} in the script.
+-    **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+
+### JAVA /SCALA Program
+
+- **Program type**: select JAVA/SCALA program.
+- **The class of the main function**: is the full path of the Main Class, the entry point of the MapReduce program.
+- **Main jar package**: is the MapReduce jar package.
+- **Task name** (optional): MapReduce task name.
+- **Command line parameters**: set the input parameters of the MapReduce program and support the substitution of custom parameter variables.
+- **Other parameters**: support -D, -files, -libjars, -archives format.
+- **Resource**: If the resource file is referenced in other parameters, you need to select and specify in the resource
+- **User-defined parameter**: It is a user-defined parameter of the MapReduce part, which will replace the content with \${variable} in the script
+
+## Python program
+
+- **Program type**: select Python language
+- **Main jar package**: is the Python jar package for running MR
+- **Other parameters**: support -D, -mapper, -reducer, -input -output format, here you can set the input of user-defined parameters, such as:
+- -mapper "mapper.py 1" -file mapper.py -reducer reducer.py -file reducer.py –input /journey/words.txt -output /journey/out/mr/\${currentTimeMillis}
+- The mapper.py 1 after -mapper is two parameters, the first parameter is mapper.py, and the second parameter is 1
+- **Resource**: If the resource file is referenced in other parameters, you need to select and specify in the resource
+- **User-defined parameter**: It is a user-defined parameter of the MapReduce part, which will replace the content with \${variable} in the script
+
+## Task Example
+
+### Execute the WordCount program
+
+This example is a common introductory type of MapReduce application, which is designed to count the number of identical words in the input text.
+
+#### Uploading the main package
+
+When using the MapReduce task node, you will need to use the Resource Centre to upload the jar package for the executable. Refer to the [resource centre](../resource.md).
+
+After configuring the Resource Centre, you can upload the required target files directly using drag and drop.
+
+![resource_upload](/img/tasks/demo/resource_upload.png)
+
+#### Configuring MapReduce nodes
+
+Simply configure the required content according to the parameter descriptions above.
+
+![demo-mr-simple](/img/tasks/demo/mr.png)
diff --git a/docs/2.0.9/docs/en/guide/task/pigeon.md b/docs/2.0.9/docs/en/guide/task/pigeon.md
new file mode 100644
index 0000000..b50e1c1
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/pigeon.md
@@ -0,0 +1,19 @@
+# Pigeon
+
+Pigeon is general websocket service tracking task for DolphinScheduler. It can trigger, check status, get log from remote websocket service.
+
+## Create
+
+Drag in the toolbar<img src="/img/pigeon.png" width="20"/>The task node to the drawing board to create a new Conditions task
+
+## Parameter
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- Target task name: Pigeon websocket service name.
diff --git a/docs/2.0.9/docs/en/guide/task/python.md b/docs/2.0.9/docs/en/guide/task/python.md
new file mode 100644
index 0000000..6edb29d
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/python.md
@@ -0,0 +1,55 @@
+# Python Node
+
+## Overview
+
+Use `Python Task` to create a python-type task and execute python scripts. When the worker executes `Python Task`,
+it will generate a temporary python script, and executes the script by the Linux user with the same name as the tenant.
+
+## Create Task
+
+- Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag <img src="/img/tasks/icons/python.png" width="15"/> from the toolbar to the canvas.
+
+## Task Parameter
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: Describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, execute in the order of priority from high to low, and tasks with the same priority will execute in a first-in first-out order.
+- Worker grouping: Assign tasks to the machines of the worker group to execute. If `Default` is selected, randomly select a worker machine for execution.
+- Environment Name: Configure the environment name in which to run the script.
+- Number of failed retry attempts: The failure task resubmitting times. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will send and the task execution will fail.
+- Script: Python program developed by the user.
+- Resource: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.
+- Custom parameters: It is the user-defined parameters of Python, which will replace the content with \${variable} in the script.
+
+## Task Example
+
+### Simply Print
+
+This example simulates a common task that runs by a simple command. The example is to print one line in the log file, as shown in the following figure:
+"This is a demo of python task".
+
+![demo-python-simple](/img/tasks/demo/python.jpg)
+
+```python
+print("This is a demo of python task")
+```
+
+### Custom Parameters
+
+This example simulates a custom parameter task. We use parameters for reusing existing tasks as template or coping with the dynamic task. In this case,
+we declare a custom parameter named "param_key", with the value "param_val". Then we use echo to print the parameter "${param_key}" we just declared.
+After running this example, we would see "param_val" print in the log.
+
+![demo-python-custom-param](/img/tasks/demo/python_custom_param.jpg)
+
+```python
+print("${param_key}")
+```
+
+## Notice
+
+None
diff --git a/docs/2.0.9/docs/en/guide/task/shell.md b/docs/2.0.9/docs/en/guide/task/shell.md
new file mode 100644
index 0000000..f23fc24
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/shell.md
@@ -0,0 +1,47 @@
+# Shell
+
+## Overview
+
+Shell task, used to create a shell-type task and execute a series of shell scripts. When the worker executed,
+a temporary shell script is generated, and the Linux user with the same name as the tenant executes the script.
+
+## Create Task
+
+- Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag <img src="/img/tasks/icons/shell.png" width="15"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- Environment Name: Configure the environment name in which to run the script.
+- Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- Script: SHELL program developed by users.
+- Resource: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.
+- Custom parameters: It is a user-defined parameter that is part of SHELL, which will replace the content with \${variable} in the script.
+
+## Task Example
+
+### Simply Print
+
+This example is a sample echo task which only print one line in the log file, including the content
+"This is a demo of shell task". If your task only run one or two shell command, you could add task base on this example. 
+
+![demo-shell-simple](/img/tasks/demo/shell.jpg)
+
+### Custom Parameters
+
+This example is a sample custom parameter task which could reuse existing as template, or for dynamic task. First of all,
+we should declare a custom parameter named "param_key", with the value as "param_val". Then we using keyword "${param_key}"
+to using the parameter we just declared. After this example is being run, we would see "param_val" print in the log
+
+![demo-shell-custom-param](/img/tasks/demo/shell_custom_param.jpg)
+
+## Notice
+
+None
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/task/spark.md b/docs/2.0.9/docs/en/guide/task/spark.md
new file mode 100644
index 0000000..9543d18
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/spark.md
@@ -0,0 +1,62 @@
+# Spark
+
+## Overview
+
+Spark task type for executing Spark programs. For Spark nodes, the worker submits the task by using the spark command `spark submit`. See [spark-submit](https://spark.apache.org/docs/3.2.1/submitting-applications.html#launching-applications-with-spark-submit) for more details.
+
+## Create task
+
+- Click Project Management -> Project Name -> Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag the <img src="/img/tasks/icons/spark.png" width="15"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- **Node name**: The node name in a workflow definition is unique.
+- **Run flag**: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- **Descriptive information**: describe the function of the node.
+- **Task priority**: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- **Worker grouping**: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- **Environment Name**: Configure the environment name in which to run the script.
+- **Number of failed retry attempts**: The number of times the task failed to be resubmitted.
+- **Failed retry interval**: The time, in cents, interval for resubmitting the task after a failed task.
+- **Delayed execution time**: the time, in cents, that a task is delayed in execution.
+- **Timeout alarm**: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- **Program type**: supports Java, Scala and Python.
+- **Spark version**: support Spark1 and Spark2.
+- **The class of main function**: is the full path of Main Class, the entry point of the Spark program.
+- **Main jar package**: is the Spark jar package.
+- **Deployment mode**: support three modes of yarn-cluster, yarn-client and local. 
+- **Task name** (option): Spark task name.
+- **Driver core number**: This is used to set the number of Driver core, which can be set according to the actual production environment.
+- **Driver memory number**: This is used to set the number of Driver memories, which can be set according to the actual production environment.
+- **Number of Executor**: This is used to set the number of Executor, which can be set according to the actual production environment.
+- **Executor memory number**: This is used to set the number of Executor memories, which can be set according to the actual production environment.
+- **Main program parameters**: set the input parameters of the Spark program and support the substitution of custom parameter variables.
+- **Other parameters**: support `--jars`, `--files`,` --archives`, `--conf` format.
+- **Resource**: Refers to the list of resource files that need to be called in the script, and the files uploaded or created by the resource center-file management.
+- **Custom parameter**: It is a local user-defined parameter of Spark, which will replace the content with ${variable} in the script.
+- **Predecessor task**: Selecting a predecessor task for the current task will set the selected predecessor task as upstream of the current task.
+
+## Task Example
+
+### Execute the WordCount program
+
+This is a common introductory case in the Big Data ecosystem, which often applied to computational frameworks such as MapReduce, Flink and Spark. The main purpose is to count the number of identical words in the input text.
+
+#### Uploading the main package
+
+When using the Spark task node, you will need to use the Resource Center to upload the jar package for the executable. Refer to the [resource center](../resource.md).
+
+After configuring the Resource Center, you can upload the required target files directly using drag and drop.
+
+![resource_upload](/img/tasks/demo/upload_spark.png)
+
+#### Configuring Spark nodes
+
+Simply configure the required content according to the parameter descriptions above.
+
+![demo-spark-simple](/img/tasks/demo/spark_task.png)
+
+## Notice
+
+ JAVA and Scala are only used for identification, there is no difference, if it is Spark developed by Python, there is no class of the main function, the others are the same.
diff --git a/docs/2.0.9/docs/en/guide/task/sql.md b/docs/2.0.9/docs/en/guide/task/sql.md
new file mode 100644
index 0000000..4cbb582
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/sql.md
@@ -0,0 +1,43 @@
+# SQL
+
+## Overview
+
+SQL task, used to connect to database and execute SQL.
+
+## create data source
+
+Refer to [Data Source](../datasource/introduction.md)
+
+## Create Task
+
+- Click Project Management-Project Name-Workflow Definition, and click the "Create Workflow" button to enter the DAG editing page.
+- Drag <img src="/img/tasks/icons/sql.png" width="25"/> from the toolbar to the drawing board.
+
+## Task Parameter
+
+- Data source: select the corresponding data source
+- sql type: supports query and non-query. The query is a select type query, which is returned with a result set. You can specify three templates for email notification as form, attachment or form attachment. Non-queries are returned without a result set, and are for three types of operations: update, delete, and insert.
+- sql parameter: the input parameter format is key1=value1;key2=value2...
+- sql statement: SQL statement
+- UDF function: For data sources of type HIVE, you can refer to UDF functions created in the resource center. UDF functions are not supported for other types of data sources.
+- Custom parameters: SQL task type, and stored procedure is a custom parameter order to set values for the method. The custom parameter type and data type are the same as the stored procedure task type. The difference is that the SQL task type custom parameter will replace the ${variable} in the SQL statement.
+- Pre-sql: Pre-sql is executed before the sql statement.
+- Post-sql: Post-sql is executed after the sql statement.
+
+## Task Example
+
+### Create a temporary table in hive and write data
+
+This example creates a temporary table `tmp_hello_world` in hive and write a row of data. Before creating a temporary table, we need to ensure that the table does not exist, so we will use custom parameters to obtain the time of the day as the suffix of the table name every time we run, so that this task can run every day. The format of the created table name is: `tmp_hello_world_{yyyyMMdd}`.
+
+![hive-sql](/img/tasks/demo/hive-sql.png)
+
+### After running the task successfully, query the results in hive.
+
+Log in to the bigdata cluster and use 'hive' command or 'beeline' or 'JDBC' and other methods to connect to the 'Apache Hive' for the query. The query SQL is `select * from tmp_hello_world_{yyyyMMdd}`, please replace '{yyyyMMdd}' with the date of the running day. The query screenshot is as follows:
+
+![hive-sql](/img/tasks/demo/hive-result.png)
+
+## Notice
+
+Pay attention to the selection of SQL type. If it is an insert operation, you need to select "Non Query" type.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/task/stored-procedure.md b/docs/2.0.9/docs/en/guide/task/stored-procedure.md
new file mode 100644
index 0000000..92bcc80
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/stored-procedure.md
@@ -0,0 +1,13 @@
+# Stored Procedure
+
+- According to the selected data source, execute the stored procedure.
+
+> Drag in the toolbar![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_PROCEDURE.png)The task node to the drawing board, as shown in the following figure:
+
+<p align="center">
+   <img src="/img/procedure-en.png" width="80%" />
+ </p>
+
+- Data source: The data source type of the stored procedure supports MySQL and POSTGRESQL, select the corresponding data source
+- Method: is the method name of the stored procedure
+- Custom parameters: The custom parameter types of the stored procedure support IN and OUT, and the data types support nine data types: VARCHAR, INTEGER, LONG, FLOAT, DOUBLE, DATE, TIME, TIMESTAMP, and BOOLEAN
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/task/sub-process.md b/docs/2.0.9/docs/en/guide/task/sub-process.md
new file mode 100644
index 0000000..f8ac1a5
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/sub-process.md
@@ -0,0 +1,14 @@
+# SubProcess
+
+- The sub-process node is to execute a certain external workflow definition as a task node.
+> Drag the ![PNG](https://analysys.github.io/easyscheduler_docs_cn/images/toolbar_SUB_PROCESS.png) task node in the toolbar to the drawing board, as shown in the following figure:
+
+<p align="center">
+   <img src="/img/sub-process-en.png" width="80%" />
+ </p>
+
+- Node name: The node name in a workflow definition is unique
+- Run flag: identify whether this node can be scheduled normally
+- Descriptive information: describe the function of the node
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- Sub-node: It is the workflow definition of the selected sub-process. Enter the sub-node in the upper right corner to jump to the workflow definition of the selected sub-process
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/task/switch.md b/docs/2.0.9/docs/en/guide/task/switch.md
new file mode 100644
index 0000000..7dc71d5
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/task/switch.md
@@ -0,0 +1,37 @@
+# Switch
+
+Switch is a conditional judgment node, which branch should be executes according to the value of [global variable](../parameter/global.md) and the expression result written by the user.
+
+## Create
+
+Drag the <img src="/img/switch.png" width="20"/> in the tool bar to create task. **Note** After the switch task is created, you must configure it downstream to make parameter `Branch flow` work.
+
+## Parameter
+
+- Node name: The node name in a workflow definition is unique.
+- Run flag: Identifies whether this node can be scheduled normally, if it does not need to be executed, you can turn on the prohibition switch.
+- Descriptive information: describe the function of the node.
+- Task priority: When the number of worker threads is insufficient, they are executed in order from high to low, and when the priority is the same, they are executed according to the first-in first-out principle.
+- Worker grouping: Tasks are assigned to the machines of the worker group to execute. If Default is selected, a worker machine will be randomly selected for execution.
+- Number of failed retry attempts: The number of times the task failed to be resubmitted. It supports drop-down and hand-filling.
+- Failed retry interval: The time interval for resubmitting the task after a failed task. It supports drop-down and hand-filling.
+- Timeout alarm: Check the timeout alarm and timeout failure. When the task exceeds the "timeout period", an alarm email will be sent and the task execution will fail.
+- condition: You can configure multiple conditions for the switch task. When the conditions are true, the configured branch will be executed. You can configure multiple different conditions to satisfy different businesses.
+- Branch flow: The default branch flow, when all the conditions are false, it will execute this branch flow.
+
+## Detail
+
+Here we have three tasks, the dependencies are `A -> B -> [C, D]`, and task_a is a shell task and task_b is a switch task
+
+- In task A, a global variable named `id` is defined through [global variable](../parameter/global.md), and the declaration method is `${setValue(id=1)}`
+- Task B adds conditions and uses global variables declared upstream to achieve conditional judgment (note that global variables must exist when the switch is running, which means that switch task can use global variables that are not directly upstream). We want workflow execute task C when id = 1 else run task D
+  - Configure task C to run when the global variable `id=1`. Then edit `${id} == 1` in the condition of task B, select `C` as branch flow
+  - For other tasks, select `D` as branch flow
+
+Switch task configuration is as follows
+
+![task-switch-configure](../../../../../../img/switch_configure.jpg)
+
+## Related Task
+
+[condition](conditions.md):[Condition](conditions.md)task mainly executes the corresponding branch based on the execution status (success, failure) of the upstream node. The [Switch](switch.md) task mainly executes the corresponding branch based on the value of the [global parameter](../parameter/global.md) and the judgment expression result written by the user.
\ No newline at end of file
diff --git a/docs/2.0.9/docs/en/guide/upgrade.md b/docs/2.0.9/docs/en/guide/upgrade.md
new file mode 100644
index 0000000..6d42b2a
--- /dev/null
+++ b/docs/2.0.9/docs/en/guide/upgrade.md
@@ -0,0 +1,63 @@
+
+# DolphinScheduler upgrade documentation
+
+## 1. Back Up Previous Version's Files and Database.
+
+## 2. Stop All Services of DolphinScheduler.
+
+ `sh ./script/stop-all.sh`
+
+## 3. Download the New Version's Installation Package.
+
+- [Download](/en-us/download/download.html) the latest version of the installation packages.
+- The following upgrade operations need to be performed in the new version's directory.
+
+## 4. Database Upgrade
+- Modify the following properties in `conf/config/install_config.conf`.
+
+- If you use MySQL as the database to run DolphinScheduler, please comment out PostgreSQL related configurations, and add mysql connector jar into lib dir, here we download mysql-connector-java-8.0.16.jar, and then correctly config database connect information. You can download mysql connector jar [here](https://downloads.MySQL.com/archives/c-j/). Alternatively, if you use Postgres as database, you just need to comment out Mysql related configurations, and correctly config database connect information.
+
+```conf
+# Database type, username, password, IP, port, metadata. For now dbtype supports `mysql` and `postgresql`, `H2`
+# Please make sure that the value of configuration is quoted in double quotation marks, otherwise may not take effect
+DATABASE_TYPE="mysql"
+SPRING_DATASOURCE_URL="jdbc:mysql://ds1:3306/ds_201_doc?useUnicode=true&characterEncoding=UTF-8"
+# Have to modify if you are not using dolphinscheduler/dolphinscheduler as your username and password
+SPRING_DATASOURCE_USERNAME="dolphinscheduler"
+SPRING_DATASOURCE_PASSWORD="dolphinscheduler"
+```
+
+- Execute database upgrade script
+
+    `sh ./script/upgrade-dolphinscheduler.sh`
+
+## 5. Backend Service Upgrade.
+
+### 5.1 Modify the Content in `conf/config/install_config.conf` File.
+- Standalone Deployment please refer the [6, Modify running arguments] in [Standalone-Deployment](./installation/standalone.md).
+- Cluster Deployment please refer the [6, Modify running arguments] in [Cluster-Deployment](./installation/cluster.md).
+
+#### Masters Need Attentions
+
+1、Modify the workers config item in conf/config/install_config.conf file.
+
+Imaging bellow are the machine worker service to be deployed:
+| hostname | ip |
+| :---  | :---:  |
+| ds1   | 192.168.xx.10     |
+| ds2   | 192.168.xx.11     |
+| ds3   | 192.168.xx.12     |
+
+To keep worker group config consistent with the previous version, we need to modify workers config item as below:
+
+```shell
+#worker service is deployed on which machine, and also specify which worker group this worker belongs to. 
+workers="ds1:service1,ds2:service2,ds3:service2"
+```
+
+### 5.2 Execute Deploy Script.
+```shell
+`sh install.sh`
+```
+
+
diff --git a/docs/2.0.9/docs/zh/About_DolphinScheduler/About_DolphinScheduler.md b/docs/2.0.9/docs/zh/About_DolphinScheduler/About_DolphinScheduler.md
new file mode 100644
index 0000000..578ce51
--- /dev/null
+++ b/docs/2.0.9/docs/zh/About_DolphinScheduler/About_DolphinScheduler.md
@@ -0,0 +1,12 @@
+# 关于DolphinScheduler
+
+Apache DolphinScheduler是一个分布式易扩展的可视化DAG工作流任务调度开源系统。解决数据研发ETL 错综复杂的依赖关系,不能直观监控任务健康状态等问题。DolphinScheduler以DAG流式的方式将Task组装起来,可实时监控任务的运行状态,同时支持重试、从指定节点恢复失败、暂停及Kill任务等操作
+
+# 简单易用
+DAG监控界面,所有流程定义都是可视化,通过拖拽任务定制DAG,通过API方式与第三方系统对接, 一键部署
+# 高可靠性
+去中心化的多Master和多Worker, 自身支持HA功能, 采用任务队列来避免过载,不会造成机器卡死
+# 丰富的使用场景
+支持暂停恢复操作.支持多租户,更好的应对大数据的使用场景. 支持更多的任务类型,如 spark, hive, mr, python, sub_process, shell
+# 高扩展性
+支持自定义任务类型,调度器使用分布式调度,调度能力随集群线性增长,Master和Worker支持动态上下线
diff --git a/docs/2.0.9/docs/zh/architecture/cache.md b/docs/2.0.9/docs/zh/architecture/cache.md
new file mode 100644
index 0000000..8b4804d
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/cache.md
@@ -0,0 +1,42 @@
+### 缓存
+
+#### 缓存目的
+
+由于在master-server调度过程中,会产生大量的数据库读取操作,如tenant,user,processDefinition等,一方面对DB产生很大的读压力,另一方面则会使整个核心调度流程变得缓慢;
+
+考虑到这部分业务数据是读多写少的场景,故引入了缓存模块,以减少DB读压力,加快核心调度流程;
+
+#### 缓存设置
+
+```yaml
+spring:
+  cache:
+    # default enable cache, you can disable by `type: none`
+    type: none
+    cache-names:
+      - tenant
+      - user
+      - processDefinition
+      - processTaskRelation
+      - taskDefinition
+    caffeine:
+      spec: maximumSize=100,expireAfterWrite=300s,recordStats
+```
+
+缓存模块采用[spring-cache](https://spring.io/guides/gs/caching/)机制,可直接在spring配置文件中配置是否开启缓存(默认`none`关闭), 缓存类型;
+
+目前采用[caffeine](https://github.com/ben-manes/caffeine)进行缓存管理,可自由设置缓存相关配置,如缓存大小、过期时间等;
+
+#### 缓存读取
+
+缓存采用spring-cache的注解,配置在相关的mapper层,可参考如:`TenantMapper`.
+
+#### 缓存更新
+
+业务数据的更新来自于api-server, 而缓存端在master-server, 故需要对api-server的数据更新做监听(aspect切面拦截`@CacheEvict`),当需要进行缓存驱逐时会通知master-server,master-server接收到cacheEvictCommand后进行缓存驱逐;
+
+需要注意的是:缓存更新的兜底策略来自于用户在caffeine中的过期策略配置,请结合业务进行配置;
+
+时序图如下图所示:
+
+<img src="/img/cache-evict.png" alt="cache-evict" style="zoom: 67%;" />
\ No newline at end of file
diff --git a/docs/2.0.9/docs/zh/architecture/configuration.md b/docs/2.0.9/docs/zh/architecture/configuration.md
new file mode 100644
index 0000000..e581010
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/configuration.md
@@ -0,0 +1,407 @@
+<!-- markdown-link-check-disable -->
+
+# 前言
+本文档为dolphinscheduler配置文件说明文档,针对版本为 dolphinscheduler-1.3.x 版本.
+
+# 目录结构
+目前dolphinscheduler 所有的配置文件都在 [conf ] 目录中.
+为了更直观的了解[conf]目录所在的位置以及包含的配置文件,请查看下面dolphinscheduler安装目录的简化说明.
+本文主要讲述dolphinscheduler的配置文件.其他部分先不做赘述.
+
+[注:以下 dolphinscheduler 简称为DS.]
+```
+
+├─bin                               DS命令存放目录
+│  ├─dolphinscheduler-daemon.sh         启动/关闭DS服务脚本
+│  ├─start-all.sh                       根据配置文件启动所有DS服务
+│  ├─stop-all.sh                        根据配置文件关闭所有DS服务
+├─conf                              配置文件目录
+│  ├─application-api.properties         api服务配置文件
+│  ├─datasource.properties              数据库配置文件
+│  ├─registry.properties               registry配置文件
+│  ├─master.properties                  master服务配置文件
+│  ├─worker.properties                  worker服务配置文件
+│  ├─quartz.properties                  quartz服务配置文件
+│  ├─common.properties                  公共服务[存储]配置文件
+│  ├─alert.properties                   alert服务配置文件
+│  ├─config                             环境变量配置文件夹
+│      ├─install_config.conf                DS环境变量配置脚本[用于DS安装/启动]
+│  ├─env                                运行脚本环境变量配置目录
+│      ├─dolphinscheduler_env.sh            运行脚本加载环境变量配置文件[如: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]
+│  ├─org                                mybatis mapper文件目录
+│  ├─i18n                               i18n配置文件目录
+│  ├─logback-api.xml                    api服务日志配置文件
+│  ├─logback-master.xml                 master服务日志配置文件
+│  ├─logback-worker.xml                 worker服务日志配置文件
+│  ├─logback-alert.xml                  alert服务日志配置文件
+├─sql                               DS的元数据创建升级sql文件
+│  ├─create                             创建SQL脚本目录
+│  ├─upgrade                            升级SQL脚本目录
+│  ├─dolphinscheduler_postgre.sql       postgre数据库初始化脚本
+│  ├─dolphinscheduler_mysql.sql         mysql数据库初始化脚本
+│  ├─soft_version                       当前DS版本标识文件
+├─script                            DS服务部署,数据库创建/升级脚本目录
+│  ├─create-dolphinscheduler.sh         DS数据库初始化脚本      
+│  ├─upgrade-dolphinscheduler.sh        DS数据库升级脚本                
+│  ├─monitor-server.sh                  DS服务监控启动脚本               
+│  ├─scp-hosts.sh                       安装文件传输脚本                                                    
+│  ├─remove-zk-node.sh                  清理zookeeper缓存文件脚本       
+├─ui                                前端WEB资源目录
+├─lib                               DS依赖的jar存放目录
+├─install.sh                        自动安装DS服务脚本
+
+
+```
+
+
+# 配置文件详解
+
+序号| 服务分类 |  配置文件|
+|--|--|--|
+1|启动/关闭DS服务脚本|dolphinscheduler-daemon.sh
+2|数据库连接配置 | datasource.properties
+3|registry连接配置|registry.properties
+4|公共[存储]配置|common.properties
+5|API服务配置|application-api.properties
+6|Master服务配置|master.properties
+7|Worker服务配置|worker.properties
+8|Alert 服务配置|alert.properties
+9|Quartz配置|quartz.properties
+10|DS环境变量配置脚本[用于DS安装/启动]|install_config.conf
+11|运行脚本加载环境变量配置文件 <br />[如: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]|dolphinscheduler_env.sh
+12|各服务日志配置文件|api服务日志配置文件 : logback-api.xml  <br /> master服务日志配置文件  : logback-master.xml    <br /> worker服务日志配置文件 : logback-worker.xml  <br /> alert服务日志配置文件 : logback-alert.xml 
+
+
+## 1.dolphinscheduler-daemon.sh [启动/关闭DS服务脚本]
+dolphinscheduler-daemon.sh脚本负责DS的启动&关闭. 
+start-all.sh/stop-all.sh最终也是通过dolphinscheduler-daemon.sh对集群进行启动/关闭操作.
+目前DS只是做了一个基本的设置,JVM参数请根据各自资源的实际情况自行设置.
+
+默认简化参数如下:
+```bash
+export DOLPHINSCHEDULER_OPTS="
+-server 
+-Xmx16g 
+-Xms1g 
+-Xss512k 
+-XX:+UseConcMarkSweepGC 
+-XX:+CMSParallelRemarkEnabled 
+-XX:+UseFastAccessorMethods 
+-XX:+UseCMSInitiatingOccupancyOnly 
+-XX:CMSInitiatingOccupancyFraction=70
+"
+```
+
+> 不建议设置"-XX:DisableExplicitGC" , DS使用Netty进行通讯,设置该参数,可能会导致内存泄漏.
+
+## 2.datasource.properties [数据库连接]
+在DS中使用Druid对数据库连接进行管理,默认简化配置如下.
+|参数 | 默认值| 描述|
+|--|--|--|
+spring.datasource.driver-class-name| |数据库驱动
+spring.datasource.url||数据库连接地址
+spring.datasource.username||数据库用户名
+spring.datasource.password||数据库密码
+spring.datasource.initialSize|5| 初始连接池数量
+spring.datasource.minIdle|5| 最小连接池数量
+spring.datasource.maxActive|5| 最大连接池数量
+spring.datasource.maxWait|60000| 最大等待时长
+spring.datasource.timeBetweenEvictionRunsMillis|60000| 连接检测周期
+spring.datasource.timeBetweenConnectErrorMillis|60000| 重试间隔
+spring.datasource.minEvictableIdleTimeMillis|300000| 连接保持空闲而不被驱逐的最小时间
+spring.datasource.validationQuery|SELECT 1|检测连接是否有效的sql
+spring.datasource.validationQueryTimeout|3| 检测连接是否有效的超时时间[seconds]
+spring.datasource.testWhileIdle|true| 申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。
+spring.datasource.testOnBorrow|true| 申请连接时执行validationQuery检测连接是否有效
+spring.datasource.testOnReturn|false| 归还连接时执行validationQuery检测连接是否有效
+spring.datasource.defaultAutoCommit|true| 是否开启自动提交
+spring.datasource.keepAlive|true| 连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMillis,则会执行keepAlive操作。
+spring.datasource.poolPreparedStatements|true| 开启PSCache
+spring.datasource.maxPoolPreparedStatementPerConnectionSize|20| 要启用PSCache,必须配置大于0,当大于0时,poolPreparedStatements自动触发修改为true。
+
+
+## 3.registry.properties [registry连接配置,默认使用zookeeper]
+|参数 |默认值| 描述| 
+|--|--|--|
+registry.plugin.name|zookeeper| 插件名称
+registry.servers|localhost:2181| zk集群连接信息
+registry.namespace|dolphinscheduler| DS在zookeeper存储根目录(开头不带/)
+registry.base.sleep.time.ms|60| 基本重试时间差
+registry.max.sleep.ms|300| 最大重试时间
+registry.max.retries|5| 最大重试次数
+registry.session.timeout.ms|30000| session 超时时间
+registry.connection.timeout.ms|7500| 连接超时时间
+
+
+## 4.common.properties [hadoop、s3、yarn配置]
+common.properties配置文件目前主要是配置hadoop/s3a相关的配置. 
+|参数 |默认值| 描述| 
+|--|--|--|
+data.basedir.path|/tmp/dolphinscheduler|本地工作目录,用于存放临时文件
+resource.storage.type|NONE|资源文件存储类型: HDFS,S3,NONE
+resource.upload.path|/dolphinscheduler|资源文件存储路径
+hadoop.security.authentication.startup.state|false|hadoop是否开启kerberos权限
+java.security.krb5.conf.path|/opt/krb5.conf|kerberos配置目录
+login.user.keytab.username|hdfs-mycluster@ESZ.COM|kerberos登录用户
+login.user.keytab.path|/opt/hdfs.headless.keytab|kerberos登录用户keytab
+kerberos.expire.time|2|kerberos过期时间,整数,单位为小时
+resource.view.suffixs| txt,log,sh,conf,cfg,py,java,sql,hql,xml,properties|资源中心支持的文件格式
+hdfs.root.user|hdfs|如果存储类型为HDFS,需要配置拥有对应操作权限的用户
+fs.defaultFS|hdfs://mycluster:8020|请求地址如果resource.storage.type=S3,该值类似为: s3a://dolphinscheduler. 如果resource.storage.type=HDFS, 如果 hadoop 配置了 HA,需要复制core-site.xml 和 hdfs-site.xml 文件到conf目录
+fs.s3a.endpoint||s3 endpoint地址
+fs.s3a.access.key||s3 access key
+fs.s3a.secret.key||s3 secret key
+yarn.resourcemanager.ha.rm.ids||yarn resourcemanager 地址, 如果resourcemanager开启了HA, 输入HA的IP地址(以逗号分隔),如果resourcemanager为单节点, 该值为空即可
+yarn.application.status.address|http://ds1:8088/ws/v1/cluster/apps/%s|如果resourcemanager开启了HA或者没有使用resourcemanager,保持默认值即可. 如果resourcemanager为单节点,你需要将ds1 配置为resourcemanager对应的hostname
+dolphinscheduler.env.path|env/dolphinscheduler_env.sh|运行脚本加载环境变量配置文件[如: JAVA_HOME,HADOOP_HOME, HIVE_HOME ...]
+development.state|false|是否处于开发模式
+
+
+## 5.application-api.properties [API服务配置]
+|参数 |默认值| 描述| 
+|--|--|--|
+server.port|12345|api服务通讯端口
+server.servlet.session.timeout|7200|session超时时间
+server.servlet.context-path|/dolphinscheduler |请求路径
+spring.servlet.multipart.max-file-size|1024MB|最大上传文件大小
+spring.servlet.multipart.max-request-size|1024MB|最大请求大小
+server.jetty.max-http-post-size|5000000|jetty服务最大发送请求大小
+spring.messages.encoding|UTF-8|请求编码
+spring.jackson.time-zone|GMT+8|设置时区
+spring.messages.basename|i18n/messages|i18n配置
+security.authentication.type|PASSWORD|权限校验类型
+
+
+## 6.master.properties [Master服务配置]
+|参数 |默认值| 描述| 
+|--|--|--|
+master.listen.port|5678|master监听端口
+master.exec.threads|100|master工作线程数量,用于限制并行的流程实例数量
+master.exec.task.num|20|master每个流程实例的并行任务数量
+master.dispatch.task.num|3|master每个批次的派发任务数量
+master.host.selector|LowerWeight|master host选择器,用于选择合适的worker执行任务,可选值: Random, RoundRobin, LowerWeight
+master.heartbeat.interval|10|master心跳间隔,单位为秒
+master.task.commit.retryTimes|5|任务重试次数
+master.task.commit.interval|1000|任务提交间隔,单位为毫秒
+master.max.cpuload.avg|-1|master最大cpuload均值,只有高于系统cpuload均值时,master服务才能调度任务. 默认值为-1: cpu cores * 2
+master.reserved.memory|0.3|master预留内存,只有低于系统可用内存时,master服务才能调度任务,单位为G
+
+
+## 7.worker.properties [Worker服务配置]
+|参数 |默认值| 描述| 
+|--|--|--|
+worker.listen.port|1234|worker监听端口
+worker.exec.threads|100|worker工作线程数量,用于限制并行的任务实例数量
+worker.heartbeat.interval|10|worker心跳间隔,单位为秒
+worker.max.cpuload.avg|-1|worker最大cpuload均值,只有高于系统cpuload均值时,worker服务才能被派发任务. 默认值为-1: cpu cores * 2
+worker.reserved.memory|0.3|worker预留内存,只有低于系统可用内存时,worker服务才能被派发任务,单位为G
+worker.groups|default|worker分组配置,逗号分隔,例如'worker.groups=default,test' <br> worker启动时会根据该配置自动加入对应的分组
+
+
+## 8.alert.properties [Alert 告警服务配置]
+|参数 |默认值| 描述| 
+|--|--|--|
+alert.type|EMAIL|告警类型|
+mail.protocol|SMTP| 邮件服务器协议
+mail.server.host|xxx.xxx.com|邮件服务器地址
+mail.server.port|25|邮件服务器端口
+mail.sender|xxx@xxx.com|发送人邮箱
+mail.user|xxx@xxx.com|发送人邮箱名称
+mail.passwd|111111|发送人邮箱密码
+mail.smtp.starttls.enable|true|邮箱是否开启tls
+mail.smtp.ssl.enable|false|邮箱是否开启ssl
+mail.smtp.ssl.trust|xxx.xxx.com|邮箱ssl白名单
+xls.file.path|/tmp/xls|邮箱附件临时工作目录
+||以下为企业微信配置[选填]|
+enterprise.wechat.enable|false|企业微信是否启用
+enterprise.wechat.corp.id|xxxxxxx|
+enterprise.wechat.secret|xxxxxxx|
+enterprise.wechat.agent.id|xxxxxxx|
+enterprise.wechat.users|xxxxxxx|
+enterprise.wechat.token.url|https://qyapi.weixin.qq.com/cgi-bin/gettoken?  <br /> corpid=$corpId&corpsecret=$secret|
+enterprise.wechat.push.url|https://qyapi.weixin.qq.com/cgi-bin/message/send?  <br /> access_token=$token|
+enterprise.wechat.user.send.msg||发送消息格式
+enterprise.wechat.team.send.msg||群发消息格式
+plugin.dir|/Users/xx/your/path/to/plugin/dir|插件目录
+
+
+## 9.quartz.properties [Quartz配置]
+这里面主要是quartz配置,请结合实际业务场景&资源进行配置,本文暂时不做展开.
+|参数 |默认值| 描述| 
+|--|--|--|
+org.quartz.jobStore.driverDelegateClass | org.quartz.impl.jdbcjobstore.StdJDBCDelegate
+org.quartz.jobStore.driverDelegateClass | org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
+org.quartz.scheduler.instanceName | DolphinScheduler
+org.quartz.scheduler.instanceId | AUTO
+org.quartz.scheduler.makeSchedulerThreadDaemon | true
+org.quartz.jobStore.useProperties | false
+org.quartz.threadPool.class | org.quartz.simpl.SimpleThreadPool
+org.quartz.threadPool.makeThreadsDaemons | true
+org.quartz.threadPool.threadCount | 25
+org.quartz.threadPool.threadPriority | 5
+org.quartz.jobStore.class | org.quartz.impl.jdbcjobstore.JobStoreTX
+org.quartz.jobStore.tablePrefix | QRTZ_
+org.quartz.jobStore.isClustered | true
+org.quartz.jobStore.misfireThreshold | 60000
+org.quartz.jobStore.clusterCheckinInterval | 5000
+org.quartz.jobStore.acquireTriggersWithinLock|true
+org.quartz.jobStore.dataSource | myDs
+org.quartz.dataSource.myDs.connectionProvider.class | org.apache.dolphinscheduler.service.quartz.DruidConnectionProvider
+
+
+## 10.install_config.conf [DS环境变量配置脚本[用于DS安装/启动]]
+install_config.conf这个配置文件比较繁琐,这个文件主要有两个地方会用到.
+* 1.DS集群的自动安装. 
+
+> 调用install.sh脚本会自动加载该文件中的配置.并根据该文件中的内容自动配置上述的配置文件中的内容. 
+> 比如:dolphinscheduler-daemon.sh、datasource.properties、registry.properties、common.properties、application-api.properties、master.properties、worker.properties、alert.properties、quartz.properties 等文件.
+
+
+* 2.DS集群的启动&关闭.
+>DS集群在启动&关闭的时候,会加载该配置文件中的masters,workers,alertServer,apiServers等参数,启动/关闭DS集群.
+
+文件内容如下:
+```bash
+
+# 注意: 该配置文件中如果包含特殊字符,如: `.*[]^${}\+?|()@#&`, 请转义,
+#      示例: `[` 转义为 `\[`
+
+# 数据库类型, 目前仅支持 postgresql 或者 mysql
+dbtype="mysql"
+
+# 数据库 地址 & 端口
+dbhost="192.168.xx.xx:3306"
+
+# 数据库 名称
+dbname="dolphinscheduler"
+
+
+# 数据库 用户名
+username="xx"
+
+# 数据库 密码
+password="xx"
+
+# zookeeper地址
+zkQuorum="192.168.xx.xx:2181,192.168.xx.xx:2181,192.168.xx.xx:2181"
+
+# 将DS安装到哪个目录,如: /data1_1T/dolphinscheduler,
+installPath="/data1_1T/dolphinscheduler"
+
+# 使用哪个用户部署
+# 注意: 部署用户需要sudo 权限, 并且可以操作 hdfs .
+#     如果使用hdfs的话,根目录必须使用该用户进行创建.否则会有权限相关的问题.
+deployUser="dolphinscheduler"
+
+
+# 以下为告警服务配置
+# 邮件服务器地址
+mailServerHost="smtp.exmail.qq.com"
+
+# 邮件服务器 端口
+mailServerPort="25"
+
+# 发送者
+mailSender="xxxxxxxxxx"
+
+# 发送用户
+mailUser="xxxxxxxxxx"
+
+# 邮箱密码
+mailPassword="xxxxxxxxxx"
+
+# TLS协议的邮箱设置为true,否则设置为false
+starttlsEnable="true"
+
+# 开启SSL协议的邮箱配置为true,否则为false。注意: starttlsEnable和sslEnable不能同时为true
+sslEnable="false"
+
+# 邮件服务地址值,同 mailServerHost
+sslTrust="smtp.exmail.qq.com"
+
+#业务用到的比如sql等资源文件上传到哪里,可以设置:HDFS,S3,NONE。如果想上传到HDFS,请配置为HDFS;如果不需要资源上传功能请选择NONE。
+resourceStorageType="NONE"
+
+# if S3,write S3 address,HA,for example :s3a://dolphinscheduler,
+# Note,s3 be sure to create the root directory /dolphinscheduler
+defaultFS="hdfs://mycluster:8020"
+
+# 如果resourceStorageType 为S3 需要配置的参数如下:
+s3Endpoint="http://192.168.xx.xx:9010"
+s3AccessKey="xxxxxxxxxx"
+s3SecretKey="xxxxxxxxxx"
+
+# 如果ResourceManager是HA,则配置为ResourceManager节点的主备ip或者hostname,比如"192.168.xx.xx,192.168.xx.xx",否则如果是单ResourceManager或者根本没用到yarn,请配置yarnHaIps=""即可,如果没用到yarn,配置为""
+yarnHaIps="192.168.xx.xx,192.168.xx.xx"
+
+# 如果是单ResourceManager,则配置为ResourceManager节点ip或主机名,否则保持默认值即可。
+singleYarnIp="yarnIp1"
+
+# 资源文件在 HDFS/S3  存储路径
+resourceUploadPath="/dolphinscheduler"
+
+
+# HDFS/S3  操作用户
+hdfsRootUser="hdfs"
+
+# 以下为 kerberos 配置
+
+# kerberos是否开启
+kerberosStartUp="false"
+# kdc krb5 config file path
+krb5ConfPath="$installPath/conf/krb5.conf"
+# keytab username
+keytabUserName="hdfs-mycluster@ESZ.COM"
+# username keytab path
+keytabPath="$installPath/conf/hdfs.headless.keytab"
+
+
+# api 服务端口
+apiServerPort="12345"
+
+
+# 部署DS的所有主机hostname
+ips="ds1,ds2,ds3,ds4,ds5"
+
+# ssh 端口 , 默认 22
+sshPort="22"
+
+# 部署master服务主机
+masters="ds1,ds2"
+
+# 部署 worker服务的主机
+# 注意: 每一个worker都需要设置一个worker 分组的名称,默认值为 "default"
+workers="ds1:default,ds2:default,ds3:default,ds4:default,ds5:default"
+
+#  部署alert服务主机
+alertServer="ds3"
+
+# 部署api服务主机 
+apiServers="ds1"
+```
+
+## 11.dolphinscheduler_env.sh [环境变量配置]
+通过类似shell方式提交任务的的时候,会加载该配置文件中的环境变量到主机中.
+涉及到的任务类型有: Shell任务、Python任务、Spark任务、Flink任务、Datax任务等等
+```bash
+export HADOOP_HOME=/opt/soft/hadoop
+export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop
+export SPARK_HOME1=/opt/soft/spark1
+export SPARK_HOME2=/opt/soft/spark2
+export PYTHON_HOME=/opt/soft/python
+export JAVA_HOME=/opt/soft/java
+export HIVE_HOME=/opt/soft/hive
+export FLINK_HOME=/opt/soft/flink
+export DATAX_HOME=/opt/soft/datax/bin/datax.py
+
+export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$PATH:$FLINK_HOME/bin:$DATAX_HOME:$PATH
+
+```
+
+## 12.各服务日志配置文件
+对应服务服务名称| 日志文件名 |
+|--|--|--|
+api服务日志配置文件 |logback-api.xml|
+master服务日志配置文件|logback-master.xml |
+worker服务日志配置文件|logback-worker.xml |
+alert服务日志配置文件|logback-alert.xml |
diff --git a/docs/2.0.9/docs/zh/architecture/design.md b/docs/2.0.9/docs/zh/architecture/design.md
new file mode 100644
index 0000000..4418be1
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/design.md
@@ -0,0 +1,267 @@
+## 系统架构设计
+本章节介绍Apache DolphinScheduler调度系统架构
+
+
+### 1.系统架构
+
+#### 1.1 系统架构图
+<p align="center">
+  <img src="/img/architecture-1.3.0.jpg" alt="系统架构图"  width="70%" />
+  <p align="center">
+        <em>系统架构图</em>
+  </p>
+</p>
+
+#### 1.2 启动流程活动图
+<p align="center">
+  <img src="/img/master-process-2.0-zh_cn.png" alt="Start process activity diagram"  width="70%" />
+  <p align="center">
+        <em>启动流程活动图</em>
+  </p>
+</p>
+
+#### 1.3 架构说明
+
+* **MasterServer** 
+
+    MasterServer采用分布式无中心设计理念,MasterServer主要负责 DAG 任务切分、任务提交监控,并同时监听其它MasterServer和WorkerServer的健康状态。
+    MasterServer服务启动时向Zookeeper注册临时节点,通过监听Zookeeper临时节点变化来进行容错处理。
+    MasterServer基于netty提供监听服务。
+
+    ##### 该服务内主要包含:
+
+    - **Distributed Quartz**分布式调度组件,主要负责定时任务的启停操作,当quartz调起任务后,Master内部会有线程池具体负责处理任务的后续操作
+
+    - **MasterSchedulerService**是一个扫描线程,定时扫描数据库中的 **command** 表,生成工作流实例,根据不同的**命令类型**进行不同的业务操作
+
+    - **WorkflowExecuteThread**主要是负责DAG任务切分、任务提交、各种不同命令类型的逻辑处理,处理任务状态和工作流状态事件
+
+    - **EventExecuteService**处理master负责的工作流实例所有的状态变化事件,使用线程池处理工作流的状态事件
+    
+    - **StateWheelExecuteThread**处理依赖任务和超时任务的定时状态更新
+
+* **WorkerServer** 
+
+     WorkerServer也采用分布式无中心设计理念,支持自定义任务插件,主要负责任务的执行和提供日志服务。
+     WorkerServer服务启动时向Zookeeper注册临时节点,并维持心跳。
+     
+     ##### 该服务包含:
+     
+     - **WorkerManagerThread**主要通过netty领取master发送过来的任务,并根据不同任务类型调用**TaskExecuteThread**对应执行器。
+     
+     - **RetryReportTaskStatusThread**主要通过netty向master汇报任务状态,如果汇报失败,会一直重试汇报
+
+     - **LoggerServer**是一个日志服务,提供日志分片查看、刷新和下载等功能
+     
+* **Registry** 
+
+    注册中心,使用插件化实现,默认支持Zookeeper, 系统中的MasterServer和WorkerServer节点通过注册中心来进行集群管理和容错。另外系统还基于注册中心进行事件监听和分布式锁。
+    
+* **Alert** 
+
+    提供告警相关功能,仅支持单机服务。支持自定义告警插件。
+
+* **API** 
+
+    API接口层,主要负责处理前端UI层的请求。该服务统一提供RESTful api向外部提供请求服务。
+    接口包括工作流的创建、定义、查询、修改、发布、下线、手工启动、停止、暂停、恢复、从该节点开始执行等等。
+
+* **UI** 
+
+  系统的前端页面,提供系统的各种可视化操作界面,详见[功能介绍](../guide/homepage.md)部分。
+
+#### 1.4 架构设计思想
+
+##### 一、去中心化vs中心化 
+
+###### 中心化思想
+
+中心化的设计理念比较简单,分布式集群中的节点按照角色分工,大体上分为两种角色:
+<p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/master_slave.png" alt="master-slave角色"  width="50%" />
+ </p>
+
+- Master的角色主要负责任务分发并监督Slave的健康状态,可以动态的将任务均衡到Slave上,以致Slave节点不至于“忙死”或”闲死”的状态。
+- Worker的角色主要负责任务的执行工作并维护和Master的心跳,以便Master可以分配任务给Slave。
+
+
+
+中心化思想设计存在的问题:
+
+- 一旦Master出现了问题,则群龙无首,整个集群就会崩溃。为了解决这个问题,大多数Master/Slave架构模式都采用了主备Master的设计方案,可以是热备或者冷备,也可以是自动切换或手动切换,而且越来越多的新系统都开始具备自动选举切换Master的能力,以提升系统的可用性。
+- 另外一个问题是如果Scheduler在Master上,虽然可以支持一个DAG中不同的任务运行在不同的机器上,但是会产生Master的过负载。如果Scheduler在Slave上,则一个DAG中所有的任务都只能在某一台机器上进行作业提交,则并行任务比较多的时候,Slave的压力可能会比较大。
+
+
+
+###### 去中心化
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/decentralization.png" alt="去中心化"  width="50%" />
+ </p>
+
+- 在去中心化设计里,通常没有Master/Slave的概念,所有的角色都是一样的,地位是平等的,全球互联网就是一个典型的去中心化的分布式系统,联网的任意节点设备down机,都只会影响很小范围的功能。
+- 去中心化设计的核心设计在于整个分布式系统中不存在一个区别于其他节点的”管理者”,因此不存在单点故障问题。但由于不存在” 管理者”节点所以每个节点都需要跟其他节点通信才得到必须要的机器信息,而分布式系统通信的不可靠性,则大大增加了上述功能的实现难度。
+- 实际上,真正去中心化的分布式系统并不多见。反而动态中心化分布式系统正在不断涌出。在这种架构下,集群中的管理者是被动态选择出来的,而不是预置的,并且集群在发生故障的时候,集群的节点会自发的举行"会议"来选举新的"管理者"去主持工作。最典型的案例就是ZooKeeper及Go语言实现的Etcd。
+
+
+- DolphinScheduler的去中心化是Master/Worker注册到Zookeeper中,实现Master集群和Worker集群无中心,使用分片机制,公平分配工作流在master上执行,并通过不同的发送策略将任务发送给worker执行具体的任务
+
+#####  二、Master执行流程
+
+1. DolphinScheduler使用分片算法将command取模,根据master的排序id分配,master将拿到的command转换成工作流实例,使用线程池处理工作流实例
+
+
+2. DolphinScheduler对工作流的处理流程:
+
+  - 通过UI或者API调用,启动工作流,持久化一条command到数据库中
+  - Master通过分片算法,扫描Command表,生成工作流实例ProcessInstance,同时删除Command数据
+  - Master使用线程池运行WorkflowExecuteThread,执行工作流实例的流程,包括构建DAG,创建任务实例TaskInstance,将TaskInstance通过netty发送给worker
+  - Worker收到任务以后,修改任务状态,并将执行信息返回Master
+  - Master收到任务信息,持久化到数据库,并且将状态变化事件存入EventExecuteService事件队列
+  - EventExecuteService根据事件队列调用WorkflowExecuteThread进行后续任务的提交和工作流状态的修改
+
+
+##### 三、容错设计
+容错分为服务宕机容错和任务重试,服务宕机容错又分为Master容错和Worker容错两种情况
+
+###### 1. 宕机容错
+
+服务容错设计依赖于ZooKeeper的Watcher机制,实现原理如图:
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/fault-tolerant.png" alt="DolphinScheduler容错设计"  width="40%" />
+ </p>
+其中Master监控其他Master和Worker的目录,如果监听到remove事件,则会根据具体的业务逻辑进行流程实例容错或者任务实例容错。
+
+- Master容错流程:
+
+<p align="center">
+   <img src="/img/failover-master.jpg" alt="容错流程"  width="50%" />
+ </p>
+
+容错范围:从host的维度来看,Master的容错范围包括:自身host+注册中心上不存在的节点host,容错的整个过程会加锁;
+
+容错内容:Master的容错内容包括:容错工作流实例和任务实例,在容错前会比较实例的开始时间和服务节点的启动时间,在服务启动时间之后的则跳过容错;
+
+容错后处理:ZooKeeper Master容错完成之后则重新由DolphinScheduler中Scheduler线程调度,遍历 DAG 找到”正在运行”和“提交成功”的任务,对”正在运行”的任务监控其任务实例的状态,对”提交成功”的任务需要判断Task Queue中是否已经存在,如果存在则同样监控任务实例的状态,如果不存在则重新提交任务实例。
+
+
+
+- Worker容错流程:
+
+<p align="center">
+   <img src="/img/failover-worker.jpg" alt="容错流程"  width="50%" />
+ </p>
+
+容错范围:从工作流实例的维度看,每个Master只负责容错自己的工作流实例;只有在`handleDeadServer`时会加锁;
+
+容错内容:当发送Worker节点的remove事件时,Master只容错任务实例,在容错前会比较实例的开始时间和服务节点的启动时间,在服务启动时间之后的则跳过容错;
+
+容错后处理:Master Scheduler线程一旦发现任务实例为” 需要容错”状态,则接管任务并进行重新提交。
+
+注意:由于” 网络抖动”可能会使得节点短时间内失去和ZooKeeper的心跳,从而发生节点的remove事件。对于这种情况,我们使用最简单的方式,那就是节点一旦和ZooKeeper发生超时连接,则直接将Master或Worker服务停掉。
+
+###### 2.任务失败重试
+
+这里首先要区分任务失败重试、流程失败恢复、流程失败重跑的概念:
+
+- 任务失败重试是任务级别的,是调度系统自动进行的,比如一个Shell任务设置重试次数为3次,那么在Shell任务运行失败后会自己再最多尝试运行3次
+- 流程失败恢复是流程级别的,是手动进行的,恢复是从只能**从失败的节点开始执行**或**从当前节点开始执行**
+- 流程失败重跑也是流程级别的,是手动进行的,重跑是从开始节点进行
+
+
+
+接下来说正题,我们将工作流中的任务节点分了两种类型。
+
+- 一种是业务节点,这种节点都对应一个实际的脚本或者处理语句,比如Shell节点,MR节点、Spark节点、依赖节点等。
+
+- 还有一种是逻辑节点,这种节点不做实际的脚本或语句处理,只是整个流程流转的逻辑处理,比如子流程节等。
+
+所有任务都可以配置失败重试的次数,当该任务节点失败,会自动重试,直到成功或者超过配置的重试次数。
+
+如果工作流中有任务失败达到最大重试次数,工作流就会失败停止,失败的工作流可以手动进行重跑操作或者流程恢复操作
+
+
+
+##### 四、任务优先级设计
+在早期调度设计中,如果没有优先级设计,采用公平调度设计的话,会遇到先行提交的任务可能会和后继提交的任务同时完成的情况,而不能做到设置流程或者任务的优先级,因此我们对此进行了重新设计,目前我们设计如下:
+
+-  按照**不同流程实例优先级**优先于**同一个流程实例优先级**优先于**同一流程内任务优先级**优先于**同一流程内任务**提交顺序依次从高到低进行任务处理。
+    - 具体实现是根据任务实例的json解析优先级,然后把**流程实例优先级_流程实例id_任务优先级_任务id**信息保存在ZooKeeper任务队列中,当从任务队列获取的时候,通过字符串比较即可得出最需要优先执行的任务
+
+        - 其中流程定义的优先级是考虑到有些流程需要先于其他流程进行处理,这个可以在流程启动或者定时启动时配置,共有5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/process_priority.png" alt="流程优先级配置"  width="40%" />
+             </p>
+
+        - 任务的优先级也分为5级,依次为HIGHEST、HIGH、MEDIUM、LOW、LOWEST。如下图
+            <p align="center">
+               <img src="https://analysys.github.io/easyscheduler_docs_cn/images/task_priority.png" alt="任务优先级配置"  width="35%" />
+             </p>
+
+
+##### 五、Logback和netty实现日志访问
+
+-  由于Web(UI)和Worker不一定在同一台机器上,所以查看日志不能像查询本地文件那样。有两种方案:
+  -  将日志放到ES搜索引擎上
+  -  通过netty通信获取远程日志信息
+
+-  介于考虑到尽可能的DolphinScheduler的轻量级性,所以选择了gRPC实现远程访问日志信息。
+
+ <p align="center">
+   <img src="https://analysys.github.io/easyscheduler_docs_cn/images/grpc.png" alt="grpc远程访问"  width="50%" />
+ </p>
+
+
+- 我们使用自定义Logback的FileAppender和Filter功能,实现每个任务实例生成一个日志文件。
+- FileAppender主要实现如下:
+
+ ```java
+ /**
+  * task log appender
+  */
+ public class TaskLogAppender extends FileAppender<ILoggingEvent> {
+ 
+     ...
+
+    @Override
+    protected void append(ILoggingEvent event) {
+
+        if (currentlyActiveFile == null){
+            currentlyActiveFile = getFile();
+        }
+        String activeFile = currentlyActiveFile;
+        // thread name: taskThreadName-processDefineId_processInstanceId_taskInstanceId
+        String threadName = event.getThreadName();
+        String[] threadNameArr = threadName.split("-");
+        // logId = processDefineId_processInstanceId_taskInstanceId
+        String logId = threadNameArr[1];
+        ...
+        super.subAppend(event);
+    }
+}
+ ```
+
+
+以/流程定义id/流程实例id/任务实例id.log的形式生成日志
+
+- 过滤匹配以TaskLogInfo开始的线程名称:
+
+- TaskLogFilter实现如下:
+
+ ```java
+ /**
+ *  task log filter
+ */
+public class TaskLogFilter extends Filter<ILoggingEvent> {
+
+    @Override
+    public FilterReply decide(ILoggingEvent event) {
+        if (event.getThreadName().startsWith("TaskLogInfo-")){
+            return FilterReply.ACCEPT;
+        }
+        return FilterReply.DENY;
+    }
+}
+ ```
+
+
diff --git a/docs/2.0.9/docs/zh/architecture/designplus.md b/docs/2.0.9/docs/zh/architecture/designplus.md
new file mode 100644
index 0000000..bcd28ac
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/designplus.md
@@ -0,0 +1,58 @@
+## 名词解释
+
+在对Apache DolphinScheduler了解之前,我们先来认识一下调度系统常用的名词
+
+### 1.名词解释
+
+**DAG:** 全称Directed Acyclic Graph,简称DAG。工作流中的Task任务以有向无环图的形式组装起来,从入度为零的节点进行拓扑遍历,直到无后继节点为止。举例如下图:
+
+<p align="center">
+  <img src="/img/dag_examples_cn.jpg" alt="dag示例"  width="60%" />
+  <p align="center">
+        <em>dag示例</em>
+  </p>
+</p>
+
+**流程定义**:通过拖拽任务节点并建立任务节点的关联所形成的可视化**DAG**
+
+**流程实例**:流程实例是流程定义的实例化,可以通过手动启动或定时调度生成,流程定义每运行一次,产生一个流程实例
+
+**任务实例**:任务实例是流程定义中任务节点的实例化,标识着具体的任务执行状态
+
+**任务类型**:目前支持有SHELL、SQL、SUB_PROCESS(子流程)、PROCEDURE、MR、SPARK、PYTHON、DEPENDENT(依赖)、,同时计划支持动态插件扩展,注意:其中子 **SUB_PROCESS**
+也是一个单独的流程定义,是可以单独启动执行的
+
+**调度方式**:系统支持基于cron表达式的定时调度和手动调度。命令类型支持:启动工作流、从当前节点开始执行、恢复被容错的工作流、恢复暂停流程、从失败节点开始执行、补数、定时、重跑、暂停、停止、恢复等待线程。
+其中 **恢复被容错的工作流** 和 **恢复等待线程** 两种命令类型是由调度内部控制使用,外部无法调用
+
+**定时调度**:系统采用 **quartz** 分布式调度器,并同时支持cron表达式可视化的生成
+
+**依赖**:系统不单单支持 **DAG** 简单的前驱和后继节点之间的依赖,同时还提供**任务依赖**节点,支持**流程间的自定义任务依赖**
+
+**优先级** :支持流程实例和任务实例的优先级,如果流程实例和任务实例的优先级不设置,则默认是先进先出
+
+**邮件告警**:支持 **SQL任务** 查询结果邮件发送,流程实例运行结果邮件告警及容错告警通知
+
+**失败策略**:对于并行运行的任务,如果有任务失败,提供两种失败策略处理方式,**继续**是指不管并行运行任务的状态,直到流程失败结束。**结束**是指一旦发现失败任务,则同时Kill掉正在运行的并行任务,流程失败结束
+
+**补数**:补历史数据,支持**区间并行和串行**两种补数方式
+
+### 2.模块介绍
+
+- dolphinscheduler-alert 告警模块,提供 AlertServer 服务。
+
+- dolphinscheduler-api web应用模块,提供 ApiServer 服务。
+
+- dolphinscheduler-common 通用的常量枚举、工具类、数据结构或者基类
+
+- dolphinscheduler-dao 提供数据库访问等操作。
+
+- dolphinscheduler-remote 基于 netty 的客户端、服务端
+
+- dolphinscheduler-server MasterServer 和 WorkerServer 服务
+
+- dolphinscheduler-service service模块,包含Quartz、Zookeeper、日志客户端访问服务,便于server模块和api模块调用
+
+- dolphinscheduler-ui 前端模块
+
+
diff --git a/docs/2.0.9/docs/zh/architecture/load-balance.md b/docs/2.0.9/docs/zh/architecture/load-balance.md
new file mode 100644
index 0000000..cb381f3
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/load-balance.md
@@ -0,0 +1,58 @@
+### 负载均衡
+负载均衡即通过路由算法(通常是集群环境),合理的分摊服务器压力,达到服务器性能的最大优化。
+
+### DolphinScheduler-Worker 负载均衡算法
+
+DolphinScheduler-Master 分配任务至 worker,默认提供了三种算法:
+
+加权随机(random)
+
+平滑轮询(roundrobin)
+
+线性负载(lowerweight)
+
+默认配置为线性加权负载。
+
+由于路由是在客户端做的,即 master 服务,因此你可以更改 master.properties 中的 master.host.selector 来配置你所想要的算法。
+
+eg:master.host.selector=random(不区分大小写)
+
+### Worker 负载均衡配置
+
+配置文件 worker.properties
+
+#### 权重
+
+上述所有的负载算法都是基于权重来进行加权分配的,权重影响分流结果。你可以在 修改 worker.weight 的值来给不同的机器设置不同的权重。
+
+#### 预热
+
+考虑到 JIT 优化,我们会让 worker 在启动后低功率的运行一段时间,使其逐渐达到最佳状态,这段过程我们称之为预热。感兴趣的同学可以去阅读 JIT 相关的文章。
+
+因此 worker 在启动后,他的权重会随着时间逐渐达到最大(默认十分钟,我们没有提供配置项,如果需要,你可以修改并提交相关的 PR)。
+
+### 负载均衡算法细述
+
+#### 随机(加权)
+
+该算法比较简单,即在符合的 worker 中随机选取一台(权重会影响他的比重)。
+
+#### 平滑轮询(加权)
+
+加权轮询算法一个明显的缺陷。即在某些特殊的权重下,加权轮询调度会生成不均匀的实例序列,这种不平滑的负载可能会使某些实例出现瞬时高负载的现象,导致系统存在宕机的风险。为了解决这个调度缺陷,我们提供了平滑加权轮询算法。
+
+每台 worker 都有两个权重,即 weight(预热完成后保持不变),current_weight(动态变化),每次路由。都会遍历所有的 worker,使其 current_weight+weight,同时累加所有 worker 的 weight,计为  total_weight,然后挑选 current_weight 最大的作为本次执行任务的 worker,与此同时,将这台 worker 的 current_weight-total_weight。
+
+#### 线性加权(默认算法)
+
+该算法每隔一段时间会向注册中心上报自己的负载信息。我们主要根据两个信息来进行判断
+
+* load 平均值(默认是 CPU 核数 *2)
+* 可用物理内存(默认是 0.3,单位是 G)
+
+如果两者任何一个低于配置项,那么这台 worker 将不参与负载。(即不分配流量)
+
+你可以在 worker.properties 修改下面的属性来自定义配置
+
+* worker.max.cpuload.avg=-1 (worker最大cpuload均值,只有高于系统cpuload均值时,worker服务才能被派发任务. 默认值为-1: cpu cores * 2)
+* worker.reserved.memory=0.3 (worker预留内存,只有低于系统可用内存时,worker服务才能被派发任务,单位为G)
diff --git a/docs/2.0.9/docs/zh/architecture/metadata.md b/docs/2.0.9/docs/zh/architecture/metadata.md
new file mode 100644
index 0000000..6cda094
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/metadata.md
@@ -0,0 +1,185 @@
+# Dolphin Scheduler 2.0元数据文档
+
+<a name="25Ald"></a>
+### 表概览
+| 表名 | 表信息 |
+| :---: | :---: |
+| t_ds_access_token | 访问ds后端的token |
+| t_ds_alert | 告警信息 |
+| t_ds_alertgroup | 告警组 |
+| t_ds_command | 执行命令 |
+| t_ds_datasource | 数据源 |
+| t_ds_error_command | 错误命令 |
+| t_ds_process_definition | 流程定义 |
+| t_ds_process_instance | 流程实例 |
+| t_ds_project | 项目 |
+| t_ds_queue | 队列 |
+| t_ds_relation_datasource_user | 用户关联数据源 |
+| t_ds_relation_process_instance | 子流程 |
+| t_ds_relation_project_user | 用户关联项目 |
+| t_ds_relation_resources_user | 用户关联资源 |
+| t_ds_relation_udfs_user | 用户关联UDF函数 |
+| t_ds_relation_user_alertgroup | 用户关联告警组 |
+| t_ds_resources | 资源文件 |
+| t_ds_schedules | 流程定时调度 |
+| t_ds_session | 用户登录的session |
+| t_ds_task_instance | 任务实例 |
+| t_ds_tenant | 租户 |
+| t_ds_udfs | UDF资源 |
+| t_ds_user | 用户 |
+| t_ds_version | ds版本信息 |
+
+<a name="VNVGr"></a>
+### 用户	队列	数据源
+![image.png](/img/metadata-erd/user-queue-datasource.png)
+
+- 一个租户下可以有多个用户<br />
+- t_ds_user中的queue字段存储的是队列表中的queue_name信息,t_ds_tenant下存的是queue_id,在流程定义执行过程中,用户队列优先级最高,用户队列为空则采用租户队列<br />
+- t_ds_datasource表中的user_id字段表示创建该数据源的用户,t_ds_relation_datasource_user中的user_id表示,对数据源有权限的用户<br />
+<a name="HHyGV"></a>
+### 项目	资源	告警
+![image.png](/img/metadata-erd/project-resource-alert.png)
+
+- 一个用户可以有多个项目,用户项目授权通过t_ds_relation_project_user表完成project_id和user_id的关系绑定<br />
+- t_ds_projcet表中的user_id表示创建该项目的用户,t_ds_relation_project_user表中的user_id表示对项目有权限的用户<br />
+- t_ds_resources表中的user_id表示创建该资源的用户,t_ds_relation_resources_user中的user_id表示对资源有权限的用户<br />
+- t_ds_udfs表中的user_id表示创建该UDF的用户,t_ds_relation_udfs_user表中的user_id表示对UDF有权限的用户<br />
+<a name="Bg2Sn"></a>
+### 命令	流程	任务
+![image.png](/img/metadata-erd/command.png)<br />![image.png](/img/metadata-erd/process-task.png)
+
+- 一个项目有多个流程定义,一个流程定义可以生成多个流程实例,一个流程实例可以生成多个任务实例<br />
+- t_ds_schedulers表存放流程定义的定时调度信息<br />
+- t_ds_relation_process_instance表存放的数据用于处理流程定义中含有子流程的情况,parent_process_instance_id表示含有子流程的主流程实例id,process_instance_id表示子流程实例的id,parent_task_instance_id表示子流程节点的任务实例id,流程实例表和任务实例表分别对应t_ds_process_instance表和t_ds_task_instance表
+<a name="Pv25P"></a>
+### 核心表Schema
+<a name="32Jzd"></a>
+#### t_ds_process_definition
+| 字段 | 类型 | 注释 |
+| --- | --- | --- |
+| id | int | 主键 |
+| name | varchar | 流程定义名称 |
+| version | int | 流程定义版本 |
+| release_state | tinyint | 流程定义的发布状态:0 未上线  1已上线 |
+| project_id | int | 项目id |
+| user_id | int | 流程定义所属用户id |
+| process_definition_json | longtext | 流程定义json串 |
+| description | text | 流程定义描述 |
+| global_params | text | 全局参数 |
+| flag | tinyint | 流程是否可用:0 不可用,1 可用 |
+| locations | text | 节点坐标信息 |
+| connects | text | 节点连线信息 |
+| receivers | text | 收件人 |
+| receivers_cc | text | 抄送人 |
+| create_time | datetime | 创建时间 |
+| timeout | int | 超时时间 |
+| tenant_id | int | 租户id |
+| update_time | datetime | 更新时间 |
+| modify_by | varchar | 修改用户 |
+| resource_ids | varchar | 资源id集 |
+
+<a name="e6jfz"></a>
+#### t_ds_process_instance
+| 字段 | 类型 | 注释 |
+| --- | --- | --- |
+| id | int | 主键 |
+| name | varchar | 流程实例名称 |
+| process_definition_id | int | 流程定义id |
+| state | tinyint | 流程实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成 |
+| recovery | tinyint | 流程实例容错标识:0 正常,1 需要被容错重启 |
+| start_time | datetime | 流程实例开始时间 |
+| end_time | datetime | 流程实例结束时间 |
+| run_times | int | 流程实例运行次数 |
+| host | varchar | 流程实例所在的机器 |
+| command_type | tinyint | 命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程 |
+| command_param | text | 命令的参数(json格式) |
+| task_depend_type | tinyint | 节点依赖类型:0 当前节点,1 向前执行,2 向后执行 |
+| max_try_times | tinyint | 最大重试次数 |
+| failure_strategy | tinyint | 失败策略 0 失败后结束,1 失败后继续 |
+| warning_type | tinyint | 告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发 |
+| warning_group_id | int | 告警组id |
+| schedule_time | datetime | 预期运行时间 |
+| command_start_time | datetime | 开始命令时间 |
+| global_params | text | 全局参数(固化流程定义的参数) |
+| process_instance_json | longtext | 流程实例json(copy的流程定义的json) |
+| flag | tinyint | 是否可用,1 可用,0不可用 |
+| update_time | timestamp | 更新时间 |
+| is_sub_process | int | 是否是子工作流 1 是,0 不是 |
+| executor_id | int | 命令执行用户 |
+| locations | text | 节点坐标信息 |
+| connects | text | 节点连线信息 |
+| history_cmd | text | 历史命令,记录所有对流程实例的操作 |
+| dependence_schedule_times | text | 依赖节点的预估时间 |
+| process_instance_priority | int | 流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group | varchar | 任务指定运行的worker分组 |
+| timeout | int | 超时时间 |
+| tenant_id | int | 租户id |
+
+<a name="IvHEc"></a>
+#### t_ds_task_instance
+| 字段 | 类型 | 注释 |
+| --- | --- | --- |
+| id | int | 主键 |
+| name | varchar | 任务名称 |
+| task_type | varchar | 任务类型 |
+| process_definition_id | int | 流程定义id |
+| process_instance_id | int | 流程实例id |
+| task_json | longtext | 任务节点json |
+| state | tinyint | 任务实例状态:0 提交成功,1 正在运行,2 准备暂停,3 暂停,4 准备停止,5 停止,6 失败,7 成功,8 需要容错,9 kill,10 等待线程,11 等待依赖完成 |
+| submit_time | datetime | 任务提交时间 |
+| start_time | datetime | 任务开始时间 |
+| end_time | datetime | 任务结束时间 |
+| host | varchar | 执行任务的机器 |
+| execute_path | varchar | 任务执行路径 |
+| log_path | varchar | 任务日志路径 |
+| alert_flag | tinyint | 是否告警 |
+| retry_times | int | 重试次数 |
+| pid | int | 进程pid |
+| app_link | varchar | yarn app id |
+| flag | tinyint | 是否可用:0 不可用,1 可用 |
+| retry_interval | int | 重试间隔 |
+| max_retry_times | int | 最大重试次数 |
+| task_instance_priority | int | 任务实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group | varchar | 任务指定运行的worker分组 |
+
+<a name="pPQkU"></a>
+#### t_ds_schedules
+| 字段 | 类型 | 注释 |
+| --- | --- | --- |
+| id | int | 主键 |
+| process_definition_id | int | 流程定义id |
+| start_time | datetime | 调度开始时间 |
+| end_time | datetime | 调度结束时间 |
+| crontab | varchar | crontab 表达式 |
+| failure_strategy | tinyint | 失败策略: 0 结束,1 继续 |
+| user_id | int | 用户id |
+| release_state | tinyint | 状态:0 未上线,1 上线 |
+| warning_type | tinyint | 告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发 |
+| warning_group_id | int | 告警组id |
+| process_instance_priority | int | 流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group | varchar | 任务指定运行的worker分组 |
+| create_time | datetime | 创建时间 |
+| update_time | datetime | 更新时间 |
+
+<a name="TkQzn"></a>
+#### t_ds_command
+| 字段 | 类型 | 注释 |
+| --- | --- | --- |
+| id | int | 主键 |
+| command_type | tinyint | 命令类型:0 启动工作流,1 从当前节点开始执行,2 恢复被容错的工作流,3 恢复暂停流程,4 从失败节点开始执行,5 补数,6 调度,7 重跑,8 暂停,9 停止,10 恢复等待线程 |
+| process_definition_id | int | 流程定义id |
+| command_param | text | 命令的参数(json格式) |
+| task_depend_type | tinyint | 节点依赖类型:0 当前节点,1 向前执行,2 向后执行 |
+| failure_strategy | tinyint | 失败策略:0结束,1继续 |
+| warning_type | tinyint | 告警类型:0 不发,1 流程成功发,2 流程失败发,3 成功失败都发 |
+| warning_group_id | int | 告警组 |
+| schedule_time | datetime | 预期运行时间 |
+| start_time | datetime | 开始时间 |
+| executor_id | int | 执行用户id |
+| dependence | varchar | 依赖字段 |
+| update_time | datetime | 更新时间 |
+| process_instance_priority | int | 流程实例优先级:0 Highest,1 High,2 Medium,3 Low,4 Lowest |
+| worker_group | varchar | 任务指定运行的worker分组 |
+
+
+
diff --git a/docs/2.0.9/docs/zh/architecture/task-structure.md b/docs/2.0.9/docs/zh/architecture/task-structure.md
new file mode 100644
index 0000000..f369116
--- /dev/null
+++ b/docs/2.0.9/docs/zh/architecture/task-structure.md
@@ -0,0 +1,1134 @@
+
+# 任务总体存储结构
+在dolphinscheduler中创建的所有任务都保存在t_ds_process_definition 表中.
+
+该数据库表结构如下表所示:
+
+
+序号 | 字段  | 类型  |  描述
+-------- | ---------| -------- | ---------
+1|id|int(11)|主键
+2|name|varchar(255)|流程定义名称
+3|version|int(11)|流程定义版本
+4|release_state|tinyint(4)|流程定义的发布状态:0 未上线 ,  1已上线
+5|project_id|int(11)|项目id
+6|user_id|int(11)|流程定义所属用户id
+7|process_definition_json|longtext|流程定义JSON
+8|description|text|流程定义描述
+9|global_params|text|全局参数
+10|flag|tinyint(4)|流程是否可用:0 不可用,1 可用
+11|locations|text|节点坐标信息
+12|connects|text|节点连线信息
+13|receivers|text|收件人
+14|receivers_cc|text|抄送人
+15|create_time|datetime|创建时间
+16|timeout|int(11) |超时时间
+17|tenant_id|int(11) |租户id
+18|update_time|datetime|更新时间
+19|modify_by|varchar(36)|修改用户
+20|resource_ids|varchar(255)|资源ids
+
+其中process_definition_json 字段为核心字段, 定义了 DAG 图中的任务信息.该数据以JSON 的方式进行存储.
+
+公共的数据结构如下表.
+序号 | 字段  | 类型  |  描述
+-------- | ---------| -------- | ---------
+1|globalParams|Array|全局参数
+2|tasks|Array|流程中的任务集合  [ 各个类型的结构请参考如下章节]
+3|tenantId|int|租户id
+4|timeout|int|超时时间
+
+数据示例:
+```bash
+{
+    "globalParams":[
+        {
+            "prop":"golbal_bizdate",
+            "direct":"IN",
+            "type":"VARCHAR",
+            "value":"${system.biz.date}"
+        }
+    ],
+    "tasks":Array[1],
+    "tenantId":0,
+    "timeout":0
+}
+```
+
+# 各任务类型存储结构详解
+
+## Shell节点
+**节点数据结构如下:**
+序号|参数名||类型|描述 |描述
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| 任务编码|
+2|type ||String |类型 |SHELL
+3| name| |String|名称 |
+4| params| |Object| 自定义参数 |Json 格式
+5| |rawScript |String| Shell脚本 |
+6| | localParams| Array|自定义参数||
+7| | resourceList| Array|资源文件||
+8|description | |String|描述 | |
+9|runFlag | |String |运行标识| |
+10|conditionResult | |Object|条件分支 | |
+11| | successNode| Array|成功跳转节点| |
+12| | failedNode|Array|失败跳转节点 | 
+13| dependence| |Object |任务依赖 |与params互斥
+14|maxRetryTimes | |String|最大重试次数 | |
+15|retryInterval | |String |重试间隔| |
+16|timeout | |Object|超时控制 | |
+17| taskInstancePriority| |String|任务优先级 | |
+18|workerGroup | |String |Worker 分组| |
+19|preTasks | |Array|前置任务 | |
+
+
+**节点数据样例:**
+
+```bash
+{
+    "type":"SHELL",
+    "id":"tasks-80760",
+    "name":"Shell Task",
+    "params":{
+        "resourceList":[
+            {
+                "id":3,
+                "name":"run.sh",
+                "res":"run.sh"
+            }
+        ],
+        "localParams":[
+
+        ],
+        "rawScript":"echo "This is a shell script""
+    },
+    "description":"",
+    "runFlag":"NORMAL",
+    "conditionResult":{
+        "successNode":[
+            ""
+        ],
+        "failedNode":[
+            ""
+        ]
+    },
+    "dependence":{
+
+    },
+    "maxRetryTimes":"0",
+    "retryInterval":"1",
+    "timeout":{
+        "strategy":"",
+        "interval":null,
+        "enable":false
+    },
+    "taskInstancePriority":"MEDIUM",
+    "workerGroup":"default",
+    "preTasks":[
+
+    ]
+}
+
+```
+
+
+## SQL节点
+通过 SQL对指定的数据源进行数据查询、更新操作.
+
+**节点数据结构如下:**
+序号|参数名||类型|描述 |描述
+-------- | ---------| ---------| -------- | --------- | ---------
+1|id | |String| 任务编码|
+2|type ||String |类型 |SQL
+3| name| |String|名称 |
+4| params| |Object| 自定义参数 |Json 格式
+5| |type |String | 数据库类型
+6| |datasource |Int | 数据源id
+7| |sql |String | 查询SQL语句
+8| |udfs