blob: ba2d5dd115d1af23e7a7d5b890e0ccb2cb301898 [file] [log] [blame]
<!--
~ Licensed to the Apache Software Foundation (ASF) under one or more
~ contributor license agreements. See the NOTICE file distributed with
~ this work for additional information regarding copyright ownership.
~ The ASF licenses this file to You under the Apache License, Version 2.0
~ (the "License"); you may not use this file except in compliance with
~ the License. You may obtain a copy of the License at
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS,
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~ See the License for the specific language governing permissions and
~ limitations under the License.
-->
<configuration>
<property>
<name>data.basedir.path</name>
<value>/tmp/dolphinscheduler</value>
<description>
user data local directory path, please make sure the directory exists and have read write permissions
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.storage.type</name>
<display-name>Choose Resource Upload Startup Type</display-name>
<description>
resource storage type: HDFS, S3, NONE
</description>
<value>NONE</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>HDFS</value>
<label>HDFS</label>
</entry>
<entry>
<value>S3</value>
<label>S3</label>
</entry>
<entry>
<value>NONE</value>
<label>NONE</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.upload.path</name>
<value>/dolphinscheduler</value>
<description>
resource store on HDFS/S3 path, resource file will store to this hadoop hdfs path, self configuration, please make sure the directory exists on hdfs and have read write permissions. "/dolphinscheduler" is recommended
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>hadoop.security.authentication.startup.state</name>
<value>false</value>
<value-attributes>
<type>value-list</type>
<entries>
<entry>
<value>true</value>
<label>Enabled</label>
</entry>
<entry>
<value>false</value>
<label>Disabled</label>
</entry>
</entries>
<selection-cardinality>1</selection-cardinality>
</value-attributes>
<description>
whether to startup kerberos
</description>
</property>
<property>
<name>java.security.krb5.conf.path</name>
<value>/opt/krb5.conf</value>
<description>
java.security.krb5.conf path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.username</name>
<value>hdfs-mycluster@ESZ.COM</value>
<description>
login user from keytab username
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>login.user.keytab.path</name>
<value>/opt/hdfs.headless.keytab</value>
<description>
login user from keytab path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>kerberos.expire.time</name>
<value>2</value>
<description>
kerberos expire time, the unit is hour
</description>
</property>
<property>
<name>resource.view.suffixs</name>
<value>txt,log,sh,bat,conf,cfg,py,java,sql,xml,hql,properties,json,yml,yaml,ini,js</value>
<description>
resource view suffixs
</description>
</property>
<property>
<name>hdfs.root.user</name>
<value>hdfs</value>
<description>
if resource.storage.type=HDFS, the user must have the permission to create directories under the HDFS root path
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://mycluster:8020</value>
<description>
if resource.storage.type=S3, the value like: s3a://dolphinscheduler; if resource.storage.type=HDFS and namenode HA is enabled, you need to copy core-site.xml and hdfs-site.xml to conf dir
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.endpoint</name>
<value>http://host:9010</value>
<description>
s3 required,s3 endpoint
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.access.key</name>
<value>A3DXS30FO22544RE</value>
<description>
s3 required,s3 access key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>fs.s3a.secret.key</name>
<value>OloCLq3n+8+sdPHUhJ21XrSxTC+JK</value>
<description>
s3 required,s3 secret key
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>resource.manager.httpaddress.port</name>
<value>8088</value>
<description>
resourcemanager port, the default value is 8088 if not specified </description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>yarn.resourcemanager.ha.rm.ids</name>
<value>192.168.xx.xx,192.168.xx.xx</value>
<description>
if resourcemanager HA is enabled, please set the HA IPs; if resourcemanager is single, keep this value empty
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>yarn.application.status.address</name>
<value>http://ds1:8088/ws/v1/cluster/apps/%s</value>
<description>
if resourcemanager HA is enabled or not use resourcemanager, please keep the default value; If resourcemanager is single, you only need to replace ds1 to actual resourcemanager hostname
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>yarn.job.history.status.address</name>
<value>http://ds1:19888/ws/v1/history/mapreduce/jobs/%s</value>
<description>
job history status url when application number threshold is reached(default 10000, maybe it was set to 1000)
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>datasource.encryption.enable</name>
<value>false</value>
<description>
datasource encryption enable
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>datasource.encryption.salt</name>
<value>!@#$%^&amp;*</value>
<description>
datasource encryption salt
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>support.hive.oneSession</name>
<value>false</value>
<description>
Whether hive SQL is executed in the same session
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>sudo.enable</name>
<value>true</value>
<description>
use sudo or not, if set true, executing user is tenant user and deploy user needs sudo permissions; if set false, executing user is the deploy user and doesn't need sudo permissions
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>development.state</name>
<value>false</value>
<description>
development state
</description>
<on-ambari-upgrade add="true"/>
</property>
<property>
<name>dolphinscheduler.env.path</name>
<value>lib/plugin/datasource</value>
<description>
system env path
</description>
<on-ambari-upgrade add="true"/>
</property>
</configuration>