| <?xml version="1.0"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| |
| <!-- |
| Licensed to the Apache Software Foundation (ASF) under one or more |
| contributor license agreements. See the NOTICE file distributed with |
| this work for additional information regarding copyright ownership. |
| The ASF licenses this file to You under the Apache License, Version 2.0 |
| (the "License"); you may not use this file except in compliance with |
| the License. You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| --> |
| |
| <!-- Do not modify this file directly. Instead, copy entries that you --> |
| <!-- wish to modify from this file into yarn-site.xml and change them --> |
| <!-- there. If yarn-site.xml does not already exist, create it. --> |
| |
| <configuration> |
| |
| <!-- IPC Configs --> |
| <property> |
| <description>Factory to create client IPC classes.</description> |
| <name>yarn.ipc.client.factory.class</name> |
| </property> |
| |
| <property> |
| <description>Type of serialization to use.</description> |
| <name>yarn.ipc.serializer.type</name> |
| <value>protocolbuffers</value> |
| </property> |
| |
| <property> |
| <description>Factory to create server IPC classes.</description> |
| <name>yarn.ipc.server.factory.class</name> |
| </property> |
| |
| <property> |
| <description>Factory to create IPC exceptions.</description> |
| <name>yarn.ipc.exception.factory.class</name> |
| </property> |
| |
| <property> |
| <description>Factory to create serializeable records.</description> |
| <name>yarn.ipc.record.factory.class</name> |
| </property> |
| |
| <property> |
| <description>RPC class implementation</description> |
| <name>yarn.ipc.rpc.class</name> |
| <value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value> |
| </property> |
| |
| <!-- Resource Manager Configs --> |
| <property> |
| <description>The hostname of the RM.</description> |
| <name>yarn.resourcemanager.hostname</name> |
| <value>0.0.0.0</value> |
| </property> |
| |
| <property> |
| <description>The address of the applications manager interface in the RM.</description> |
| <name>yarn.resourcemanager.address</name> |
| <value>${yarn.resourcemanager.hostname}:8032</value> |
| </property> |
| |
| <property> |
| <description> |
| The actual address the server will bind to. If this optional address is |
| set, the RPC and webapp servers will bind to this address and the port specified in |
| yarn.resourcemanager.address and yarn.resourcemanager.webapp.address, respectively. This |
| is most useful for making RM listen to all interfaces by setting to 0.0.0.0. |
| </description> |
| <name>yarn.resourcemanager.bind-host</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>The number of threads used to handle applications manager requests.</description> |
| <name>yarn.resourcemanager.client.thread-count</name> |
| <value>50</value> |
| </property> |
| |
| <property> |
| <description>The expiry interval for application master reporting.</description> |
| <name>yarn.am.liveness-monitor.expiry-interval-ms</name> |
| <value>600000</value> |
| </property> |
| |
| <property> |
| <description>The Kerberos principal for the resource manager.</description> |
| <name>yarn.resourcemanager.principal</name> |
| </property> |
| |
| <property> |
| <description>The address of the scheduler interface.</description> |
| <name>yarn.resourcemanager.scheduler.address</name> |
| <value>${yarn.resourcemanager.hostname}:8030</value> |
| </property> |
| |
| <property> |
| <description>Number of threads to handle scheduler interface.</description> |
| <name>yarn.resourcemanager.scheduler.client.thread-count</name> |
| <value>50</value> |
| </property> |
| |
| <property> |
| <description> |
| This configures the HTTP endpoint for Yarn Daemons.The following |
| values are supported: |
| - HTTP_ONLY : Service is provided only on http |
| - HTTPS_ONLY : Service is provided only on https |
| </description> |
| <name>yarn.http.policy</name> |
| <value>HTTP_ONLY</value> |
| </property> |
| |
| <property> |
| <description>The http address of the RM web application.</description> |
| <name>yarn.resourcemanager.webapp.address</name> |
| <value>${yarn.resourcemanager.hostname}:8088</value> |
| </property> |
| |
| <property> |
| <description>The https adddress of the RM web application.</description> |
| <name>yarn.resourcemanager.webapp.https.address</name> |
| <value>${yarn.resourcemanager.hostname}:8090</value> |
| </property> |
| |
| <property> |
| <name>yarn.resourcemanager.resource-tracker.address</name> |
| <value>${yarn.resourcemanager.hostname}:8031</value> |
| </property> |
| |
| <property> |
| <description>Are acls enabled.</description> |
| <name>yarn.acl.enable</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>ACL of who can be admin of the YARN cluster.</description> |
| <name>yarn.admin.acl</name> |
| <value>*</value> |
| </property> |
| |
| <property> |
| <description>The address of the RM admin interface.</description> |
| <name>yarn.resourcemanager.admin.address</name> |
| <value>${yarn.resourcemanager.hostname}:8033</value> |
| </property> |
| |
| <property> |
| <description>Number of threads used to handle RM admin interface.</description> |
| <name>yarn.resourcemanager.admin.client.thread-count</name> |
| <value>1</value> |
| </property> |
| |
| <property> |
| <description>How often should the RM check that the AM is still alive.</description> |
| <name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name> |
| <value>1000</value> |
| </property> |
| |
| <property> |
| <description>Maximum time to wait to establish connection to |
| ResourceManager.</description> |
| <name>yarn.resourcemanager.connect.max-wait.ms</name> |
| <value>900000</value> |
| </property> |
| |
| <property> |
| <description>How often to try connecting to the |
| ResourceManager.</description> |
| <name>yarn.resourcemanager.connect.retry-interval.ms</name> |
| <value>30000</value> |
| </property> |
| |
| <property> |
| <description>The maximum number of application attempts. It's a global |
| setting for all application masters. Each application master can specify |
| its individual maximum number of application attempts via the API, but the |
| individual number cannot be more than the global upper bound. If it is, |
| the resourcemanager will override it. The default number is set to 2, to |
| allow at least one retry for AM.</description> |
| <name>yarn.resourcemanager.am.max-attempts</name> |
| <value>2</value> |
| </property> |
| |
| <property> |
| <description>How often to check that containers are still alive. </description> |
| <name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name> |
| <value>600000</value> |
| </property> |
| |
| <property> |
| <description>The keytab for the resource manager.</description> |
| <name>yarn.resourcemanager.keytab</name> |
| <value>/etc/krb5.keytab</value> |
| </property> |
| |
| <property> |
| <description>Flag to enable override of the default kerberos authentication |
| filter with the RM authentication filter to allow authentication using |
| delegation tokens(fallback to kerberos if the tokens are missing). Only |
| applicable when the http authentication type is kerberos.</description> |
| <name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>How long to wait until a node manager is considered dead.</description> |
| <name>yarn.nm.liveness-monitor.expiry-interval-ms</name> |
| <value>600000</value> |
| </property> |
| |
| <property> |
| <description>How often to check that node managers are still alive.</description> |
| <name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name> |
| <value>1000</value> |
| </property> |
| |
| <property> |
| <description>Path to file with nodes to include.</description> |
| <name>yarn.resourcemanager.nodes.include-path</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>Path to file with nodes to exclude.</description> |
| <name>yarn.resourcemanager.nodes.exclude-path</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>Number of threads to handle resource tracker calls.</description> |
| <name>yarn.resourcemanager.resource-tracker.client.thread-count</name> |
| <value>50</value> |
| </property> |
| |
| <property> |
| <description>The class to use as the resource scheduler.</description> |
| <name>yarn.resourcemanager.scheduler.class</name> |
| <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value> |
| </property> |
| |
| <property> |
| <description>The minimum allocation for every container request at the RM, |
| in MBs. Memory requests lower than this won't take effect, |
| and the specified value will get allocated at minimum.</description> |
| <name>yarn.scheduler.minimum-allocation-mb</name> |
| <value>1024</value> |
| </property> |
| |
| <property> |
| <description>The maximum allocation for every container request at the RM, |
| in MBs. Memory requests higher than this won't take effect, |
| and will get capped to this value.</description> |
| <name>yarn.scheduler.maximum-allocation-mb</name> |
| <value>8192</value> |
| </property> |
| |
| <property> |
| <description>The minimum allocation for every container request at the RM, |
| in terms of virtual CPU cores. Requests lower than this won't take effect, |
| and the specified value will get allocated the minimum.</description> |
| <name>yarn.scheduler.minimum-allocation-vcores</name> |
| <value>1</value> |
| </property> |
| |
| <property> |
| <description>The maximum allocation for every container request at the RM, |
| in terms of virtual CPU cores. Requests higher than this won't take effect, |
| and will get capped to this value.</description> |
| <name>yarn.scheduler.maximum-allocation-vcores</name> |
| <value>32</value> |
| </property> |
| |
| <property> |
| <description>Enable RM to recover state after starting. If true, then |
| yarn.resourcemanager.store.class must be specified. </description> |
| <name>yarn.resourcemanager.recovery.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>Enable RM work preserving recovery. This configuration is private |
| to YARN for experimenting the feature. |
| </description> |
| <name>yarn.resourcemanager.work-preserving-recovery.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>The class to use as the persistent store. |
| |
| If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore |
| is used, the store is implicitly fenced; meaning a single ResourceManager |
| is able to use the store at any point in time. More details on this |
| implicit fencing, along with setting up appropriate ACLs is discussed |
| under yarn.resourcemanager.zk-state-store.root-node.acl. |
| </description> |
| <name>yarn.resourcemanager.store.class</name> |
| <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore</value> |
| </property> |
| |
| <property> |
| <description>The maximum number of completed applications RM state |
| store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. |
| By default, it equals to ${yarn.resourcemanager.max-completed-applications}. |
| This ensures that the applications kept in the state store are consistent with |
| the applications remembered in RM memory. |
| Any values larger than ${yarn.resourcemanager.max-completed-applications} will |
| be reset to ${yarn.resourcemanager.max-completed-applications}. |
| Note that this value impacts the RM recovery performance.Typically, |
| a smaller value indicates better performance on RM recovery. |
| </description> |
| <name>yarn.resourcemanager.state-store.max-completed-applications</name> |
| <value>${yarn.resourcemanager.max-completed-applications}</value> |
| </property> |
| |
| <property> |
| <description>Host:Port of the ZooKeeper server to be used by the RM. This |
| must be supplied when using the ZooKeeper based implementation of the |
| RM state store and/or embedded automatic failover in a HA setting. |
| </description> |
| <name>yarn.resourcemanager.zk-address</name> |
| <!--value>127.0.0.1:2181</value--> |
| </property> |
| |
| <property> |
| <description>Number of times RM tries to connect to ZooKeeper.</description> |
| <name>yarn.resourcemanager.zk-num-retries</name> |
| <value>1000</value> |
| </property> |
| |
| <property> |
| <description>Retry interval in milliseconds when connecting to ZooKeeper. |
| When HA is enabled, the value here is NOT used. It is generated |
| automatically from yarn.resourcemanager.zk-timeout-ms and |
| yarn.resourcemanager.zk-num-retries. |
| </description> |
| <name>yarn.resourcemanager.zk-retry-interval-ms</name> |
| <value>1000</value> |
| </property> |
| |
| <property> |
| <description>Full path of the ZooKeeper znode where RM state will be |
| stored. This must be supplied when using |
| org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore |
| as the value for yarn.resourcemanager.store.class</description> |
| <name>yarn.resourcemanager.zk-state-store.parent-path</name> |
| <value>/rmstore</value> |
| </property> |
| |
| <property> |
| <description>ZooKeeper session timeout in milliseconds. Session expiration |
| is managed by the ZooKeeper cluster itself, not by the client. This value is |
| used by the cluster to determine when the client's session expires. |
| Expirations happens when the cluster does not hear from the client within |
| the specified session timeout period (i.e. no heartbeat).</description> |
| <name>yarn.resourcemanager.zk-timeout-ms</name> |
| <value>10000</value> |
| </property> |
| |
| <property> |
| <description>ACL's to be used for ZooKeeper znodes.</description> |
| <name>yarn.resourcemanager.zk-acl</name> |
| <value>world:anyone:rwcda</value> |
| </property> |
| |
| <property> |
| <description> |
| ACLs to be used for the root znode when using ZKRMStateStore in a HA |
| scenario for fencing. |
| |
| ZKRMStateStore supports implicit fencing to allow a single |
| ResourceManager write-access to the store. For fencing, the |
| ResourceManagers in the cluster share read-write-admin privileges on the |
| root node, but the Active ResourceManager claims exclusive create-delete |
| permissions. |
| |
| By default, when this property is not set, we use the ACLs from |
| yarn.resourcemanager.zk-acl for shared admin access and |
| rm-address:random-number for username-based exclusive create-delete |
| access. |
| |
| This property allows users to set ACLs of their choice instead of using |
| the default mechanism. For fencing to work, the ACLs should be |
| carefully set differently on each ResourceManger such that all the |
| ResourceManagers have shared admin access and the Active ResourceManger |
| takes over (exclusively) the create-delete access. |
| </description> |
| <name>yarn.resourcemanager.zk-state-store.root-node.acl</name> |
| </property> |
| |
| <property> |
| <description> |
| Specify the auths to be used for the ACL's specified in both the |
| yarn.resourcemanager.zk-acl and |
| yarn.resourcemanager.zk-state-store.root-node.acl properties. This |
| takes a comma-separated list of authentication mechanisms, each of the |
| form 'scheme:auth' (the same syntax used for the 'addAuth' command in |
| the ZK CLI). |
| </description> |
| <name>yarn.resourcemanager.zk-auth</name> |
| </property> |
| |
| <property> |
| <description>URI pointing to the location of the FileSystem path where |
| RM state will be stored. This must be supplied when using |
| org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore |
| as the value for yarn.resourcemanager.store.class</description> |
| <name>yarn.resourcemanager.fs.state-store.uri</name> |
| <value>${hadoop.tmp.dir}/yarn/system/rmstore</value> |
| <!--value>hdfs://localhost:9000/rmstore</value--> |
| </property> |
| |
| <property> |
| <description>hdfs client retry policy specification. hdfs client retry |
| is always enabled. Specified in pairs of sleep-time and number-of-retries |
| and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on |
| average, the following n1 retries sleep t1 milliseconds on average, and so on. |
| </description> |
| <name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name> |
| <value>2000, 500</value> |
| </property> |
| |
| <property> |
| <description>Enable RM high-availability. When enabled, |
| (1) The RM starts in the Standby mode by default, and transitions to |
| the Active mode when prompted to. |
| (2) The nodes in the RM ensemble are listed in |
| yarn.resourcemanager.ha.rm-ids |
| (3) The id of each RM either comes from yarn.resourcemanager.ha.id |
| if yarn.resourcemanager.ha.id is explicitly specified or can be |
| figured out by matching yarn.resourcemanager.address.{id} with local address |
| (4) The actual physical addresses come from the configs of the pattern |
| - {rpc-config}.{id}</description> |
| <name>yarn.resourcemanager.ha.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>Enable automatic failover. |
| By default, it is enabled only when HA is enabled</description> |
| <name>yarn.resourcemanager.ha.automatic-failover.enabled</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>Enable embedded automatic failover. |
| By default, it is enabled only when HA is enabled. |
| The embedded elector relies on the RM state store to handle fencing, |
| and is primarily intended to be used in conjunction with ZKRMStateStore. |
| </description> |
| <name>yarn.resourcemanager.ha.automatic-failover.embedded</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>The base znode path to use for storing leader information, |
| when using ZooKeeper based leader election.</description> |
| <name>yarn.resourcemanager.ha.automatic-failover.zk-base-path</name> |
| <value>/yarn-leader-election</value> |
| </property> |
| |
| <property> |
| <description>Name of the cluster. In a HA setting, |
| this is used to ensure the RM participates in leader |
| election for this cluster and ensures it does not affect |
| other clusters</description> |
| <name>yarn.resourcemanager.cluster-id</name> |
| <!--value>yarn-cluster</value--> |
| </property> |
| |
| <property> |
| <description>The list of RM nodes in the cluster when HA is |
| enabled. See description of yarn.resourcemanager.ha |
| .enabled for full details on how this is used.</description> |
| <name>yarn.resourcemanager.ha.rm-ids</name> |
| <!--value>rm1,rm2</value--> |
| </property> |
| |
| <property> |
| <description>The id (string) of the current RM. When HA is enabled, this |
| is an optional config. The id of current RM can be set by explicitly |
| specifying yarn.resourcemanager.ha.id or figured out by matching |
| yarn.resourcemanager.address.{id} with local address |
| See description of yarn.resourcemanager.ha.enabled |
| for full details on how this is used.</description> |
| <name>yarn.resourcemanager.ha.id</name> |
| <!--value>rm1</value--> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the class to be used by Clients, AMs and |
| NMs to failover to the Active RM. It should extend |
| org.apache.hadoop.yarn.client.RMFailoverProxyProvider</description> |
| <name>yarn.client.failover-proxy-provider</name> |
| <value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the max number of times |
| FailoverProxyProvider should attempt failover. When set, |
| this overrides the yarn.resourcemanager.connect.max-wait.ms. When |
| not set, this is inferred from |
| yarn.resourcemanager.connect.max-wait.ms.</description> |
| <name>yarn.client.failover-max-attempts</name> |
| <!--value>15</value--> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the sleep base (in milliseconds) to be |
| used for calculating the exponential delay between failovers. When set, |
| this overrides the yarn.resourcemanager.connect.* settings. When |
| not set, yarn.resourcemanager.connect.retry-interval.ms is used instead. |
| </description> |
| <name>yarn.client.failover-sleep-base-ms</name> |
| <!--value>500</value--> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the maximum sleep time (in milliseconds) |
| between failovers. When set, this overrides the |
| yarn.resourcemanager.connect.* settings. When not set, |
| yarn.resourcemanager.connect.retry-interval.ms is used instead.</description> |
| <name>yarn.client.failover-sleep-max-ms</name> |
| <!--value>15000</value--> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the number of retries per |
| attempt to connect to a ResourceManager. In other words, |
| it is the ipc.client.connect.max.retries to be used during |
| failover attempts</description> |
| <name>yarn.client.failover-retries</name> |
| <value>0</value> |
| </property> |
| |
| <property> |
| <description>When HA is enabled, the number of retries per |
| attempt to connect to a ResourceManager on socket timeouts. In other |
| words, it is the ipc.client.connect.max.retries.on.timeouts to be used |
| during failover attempts</description> |
| <name>yarn.client.failover-retries-on-socket-timeouts</name> |
| <value>0</value> |
| </property> |
| |
| <property> |
| <description>The maximum number of completed applications RM keeps. </description> |
| <name>yarn.resourcemanager.max-completed-applications</name> |
| <value>10000</value> |
| </property> |
| |
| <property> |
| <description>Interval at which the delayed token removal thread runs</description> |
| <name>yarn.resourcemanager.delayed.delegation-token.removal-interval-ms</name> |
| <value>30000</value> |
| </property> |
| |
| <property> |
| <description>Interval for the roll over for the master key used to generate |
| application tokens |
| </description> |
| <name>yarn.resourcemanager.application-tokens.master-key-rolling-interval-secs</name> |
| <value>86400</value> |
| </property> |
| |
| <property> |
| <description>Interval for the roll over for the master key used to generate |
| container tokens. It is expected to be much greater than |
| yarn.nm.liveness-monitor.expiry-interval-ms and |
| yarn.rm.container-allocation.expiry-interval-ms. Otherwise the |
| behavior is undefined. |
| </description> |
| <name>yarn.resourcemanager.container-tokens.master-key-rolling-interval-secs</name> |
| <value>86400</value> |
| </property> |
| |
| <property> |
| <description>The heart-beat interval in milliseconds for every NodeManager in the cluster.</description> |
| <name>yarn.resourcemanager.nodemanagers.heartbeat-interval-ms</name> |
| <value>1000</value> |
| </property> |
| |
| <property> |
| <description>The minimum allowed version of a connecting nodemanager. The valid values are |
| NONE (no version checking), EqualToRM (the nodemanager's version is equal to |
| or greater than the RM version), or a Version String.</description> |
| <name>yarn.resourcemanager.nodemanager.minimum.version</name> |
| <value>NONE</value> |
| </property> |
| |
| <property> |
| <description>Enable a set of periodic monitors (specified in |
| yarn.resourcemanager.scheduler.monitor.policies) that affect the |
| scheduler.</description> |
| <name>yarn.resourcemanager.scheduler.monitor.enable</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>The list of SchedulingEditPolicy classes that interact with |
| the scheduler. A particular module may be incompatible with the |
| scheduler, other policies, or a configuration of either.</description> |
| <name>yarn.resourcemanager.scheduler.monitor.policies</name> |
| <value>org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy</value> |
| </property> |
| |
| <property> |
| <description>Number of worker threads that write the history data.</description> |
| <name>yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size</name> |
| <value>10</value> |
| </property> |
| |
| <property> |
| <description>The class to use as the configuration provider. |
| If org.apache.hadoop.yarn.LocalConfigurationProvider is used, |
| the local configuration will be loaded. |
| If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used, |
| the configuration which will be loaded should be uploaded to remote File system first. |
| </description>> |
| <name>yarn.resourcemanager.configuration.provider-class</name> |
| <value>org.apache.hadoop.yarn.LocalConfigurationProvider</value> |
| <!-- <value>org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider</value> --> |
| </property> |
| |
| <!-- Node Manager Configs --> |
| <property> |
| <description>The hostname of the NM.</description> |
| <name>yarn.nodemanager.hostname</name> |
| <value>0.0.0.0</value> |
| </property> |
| |
| <property> |
| <description>The address of the container manager in the NM.</description> |
| <name>yarn.nodemanager.address</name> |
| <value>${yarn.nodemanager.hostname}:0</value> |
| </property> |
| |
| <property> |
| <description> |
| The actual address the server will bind to. If this optional address is |
| set, the RPC and webapp servers will bind to this address and the port specified in |
| yarn.nodemanager.address and yarn.nodemanager.webapp.address, respectively. This is |
| most useful for making NM listen to all interfaces by setting to 0.0.0.0. |
| </description> |
| <name>yarn.nodemanager.bind-host</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>Environment variables that should be forwarded from the NodeManager's environment to the container's.</description> |
| <name>yarn.nodemanager.admin-env</name> |
| <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value> |
| </property> |
| |
| <property> |
| <description>Environment variables that containers may override rather than use NodeManager's default.</description> |
| <name>yarn.nodemanager.env-whitelist</name> |
| <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,HADOOP_YARN_HOME</value> |
| </property> |
| |
| <property> |
| <description>who will execute(launch) the containers.</description> |
| <name>yarn.nodemanager.container-executor.class</name> |
| <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value> |
| <!--<value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>--> |
| </property> |
| |
| <property> |
| <description>Number of threads container manager uses.</description> |
| <name>yarn.nodemanager.container-manager.thread-count</name> |
| <value>20</value> |
| </property> |
| |
| <property> |
| <description>Number of threads used in cleanup.</description> |
| <name>yarn.nodemanager.delete.thread-count</name> |
| <value>4</value> |
| </property> |
| |
| <property> |
| <description> |
| Number of seconds after an application finishes before the nodemanager's |
| DeletionService will delete the application's localized file directory |
| and log directory. |
| |
| To diagnose Yarn application problems, set this property's value large |
| enough (for example, to 600 = 10 minutes) to permit examination of these |
| directories. After changing the property's value, you must restart the |
| nodemanager in order for it to have an effect. |
| |
| The roots of Yarn applications' work directories is configurable with |
| the yarn.nodemanager.local-dirs property (see below), and the roots |
| of the Yarn applications' log directories is configurable with the |
| yarn.nodemanager.log-dirs property (see also below). |
| </description> |
| <name>yarn.nodemanager.delete.debug-delay-sec</name> |
| <value>0</value> |
| </property> |
| |
| <property> |
| <description>Keytab for NM.</description> |
| <name>yarn.nodemanager.keytab</name> |
| <value>/etc/krb5.keytab</value> |
| </property> |
| |
| <property> |
| <description>List of directories to store localized files in. An |
| application's localized file directory will be found in: |
| ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}. |
| Individual containers' work directories, called container_${contid}, will |
| be subdirectories of this. |
| </description> |
| <name>yarn.nodemanager.local-dirs</name> |
| <value>${hadoop.tmp.dir}/nm-local-dir</value> |
| </property> |
| |
| <property> |
| <description>It limits the maximum number of files which will be localized |
| in a single local directory. If the limit is reached then sub-directories |
| will be created and new files will be localized in them. If it is set to |
| a value less than or equal to 36 [which are sub-directories (0-9 and then |
| a-z)] then NodeManager will fail to start. For example; [for public |
| cache] if this is configured with a value of 40 ( 4 files + |
| 36 sub-directories) and the local-dir is "/tmp/local-dir1" then it will |
| allow 4 files to be created directly inside "/tmp/local-dir1/filecache". |
| For files that are localized further it will create a sub-directory "0" |
| inside "/tmp/local-dir1/filecache" and will localize files inside it |
| until it becomes full. If a file is removed from a sub-directory that |
| is marked full, then that sub-directory will be used back again to |
| localize files. |
| </description> |
| <name>yarn.nodemanager.local-cache.max-files-per-directory</name> |
| <value>8192</value> |
| </property> |
| |
| <property> |
| <description>Address where the localizer IPC is.</description> |
| <name>yarn.nodemanager.localizer.address</name> |
| <value>${yarn.nodemanager.hostname}:8040</value> |
| </property> |
| |
| <property> |
| <description>Interval in between cache cleanups.</description> |
| <name>yarn.nodemanager.localizer.cache.cleanup.interval-ms</name> |
| <value>600000</value> |
| </property> |
| |
| <property> |
| <description>Target size of localizer cache in MB, per nodemanager. It is |
| a target retention size that only includes resources with PUBLIC and |
| PRIVATE visibility and excludes resources with APPLICATION visibility |
| </description> |
| <name>yarn.nodemanager.localizer.cache.target-size-mb</name> |
| <value>10240</value> |
| </property> |
| |
| <property> |
| <description>Number of threads to handle localization requests.</description> |
| <name>yarn.nodemanager.localizer.client.thread-count</name> |
| <value>5</value> |
| </property> |
| |
| <property> |
| <description>Number of threads to use for localization fetching.</description> |
| <name>yarn.nodemanager.localizer.fetch.thread-count</name> |
| <value>4</value> |
| </property> |
| |
| <property> |
| <description> |
| Where to store container logs. An application's localized log directory |
| will be found in ${yarn.nodemanager.log-dirs}/application_${appid}. |
| Individual containers' log directories will be below this, in directories |
| named container_{$contid}. Each container directory will contain the files |
| stderr, stdin, and syslog generated by that container. |
| </description> |
| <name>yarn.nodemanager.log-dirs</name> |
| <value>${yarn.log.dir}/userlogs</value> |
| </property> |
| |
| <property> |
| <description>Whether to enable log aggregation. Log aggregation collects |
| each container's logs and moves these logs onto a file-system, for e.g. |
| HDFS, after the application completes. Users can configure the |
| "yarn.nodemanager.remote-app-log-dir" and |
| "yarn.nodemanager.remote-app-log-dir-suffix" properties to determine |
| where these logs are moved to. Users can access the logs via the |
| Application Timeline Server. |
| </description> |
| <name>yarn.log-aggregation-enable</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>How long to keep aggregation logs before deleting them. -1 disables. |
| Be careful set this too small and you will spam the name node.</description> |
| <name>yarn.log-aggregation.retain-seconds</name> |
| <value>-1</value> |
| </property> |
| |
| <property> |
| <description>How long to wait between aggregated log retention checks. |
| If set to 0 or a negative value then the value is computed as one-tenth |
| of the aggregated log retention time. Be careful set this too small and |
| you will spam the name node.</description> |
| <name>yarn.log-aggregation.retain-check-interval-seconds</name> |
| <value>-1</value> |
| </property> |
| |
| <property> |
| <description>Time in seconds to retain user logs. Only applicable if |
| log aggregation is disabled |
| </description> |
| <name>yarn.nodemanager.log.retain-seconds</name> |
| <value>10800</value> |
| </property> |
| |
| <property> |
| <description>Where to aggregate logs to.</description> |
| <name>yarn.nodemanager.remote-app-log-dir</name> |
| <value>/tmp/logs</value> |
| </property> |
| <property> |
| <description>The remote log dir will be created at |
| {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam} |
| </description> |
| <name>yarn.nodemanager.remote-app-log-dir-suffix</name> |
| <value>logs</value> |
| </property> |
| |
| <property> |
| <description>Amount of physical memory, in MB, that can be allocated |
| for containers.</description> |
| <name>yarn.nodemanager.resource.memory-mb</name> |
| <value>8192</value> |
| </property> |
| |
| <property> |
| <description>Whether physical memory limits will be enforced for |
| containers.</description> |
| <name>yarn.nodemanager.pmem-check-enabled</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>Whether virtual memory limits will be enforced for |
| containers.</description> |
| <name>yarn.nodemanager.vmem-check-enabled</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>Ratio between virtual memory to physical memory when |
| setting memory limits for containers. Container allocations are |
| expressed in terms of physical memory, and virtual memory usage |
| is allowed to exceed this allocation by this ratio. |
| </description> |
| <name>yarn.nodemanager.vmem-pmem-ratio</name> |
| <value>2.1</value> |
| </property> |
| |
| <property> |
| <description>Number of CPU cores that can be allocated |
| for containers.</description> |
| <name>yarn.nodemanager.resource.cpu-vcores</name> |
| <value>8</value> |
| </property> |
| |
| <property> |
| <description>NM Webapp address.</description> |
| <name>yarn.nodemanager.webapp.address</name> |
| <value>${yarn.nodemanager.hostname}:8042</value> |
| </property> |
| |
| <property> |
| <description>How often to monitor containers.</description> |
| <name>yarn.nodemanager.container-monitor.interval-ms</name> |
| <value>3000</value> |
| </property> |
| |
| <property> |
| <description>Class that calculates containers current resource utilization.</description> |
| <name>yarn.nodemanager.container-monitor.resource-calculator.class</name> |
| </property> |
| |
| <property> |
| <description>Frequency of running node health script.</description> |
| <name>yarn.nodemanager.health-checker.interval-ms</name> |
| <value>600000</value> |
| </property> |
| |
| <property> |
| <description>Script time out period.</description> |
| <name>yarn.nodemanager.health-checker.script.timeout-ms</name> |
| <value>1200000</value> |
| </property> |
| |
| <property> |
| <description>The health check script to run.</description> |
| <name>yarn.nodemanager.health-checker.script.path</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>The arguments to pass to the health check script.</description> |
| <name>yarn.nodemanager.health-checker.script.opts</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>Frequency of running disk health checker code.</description> |
| <name>yarn.nodemanager.disk-health-checker.interval-ms</name> |
| <value>120000</value> |
| </property> |
| |
| <property> |
| <description>The minimum fraction of number of disks to be healthy for the |
| nodemanager to launch new containers. This correspond to both |
| yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e. If there |
| are less number of healthy local-dirs (or log-dirs) available, then |
| new containers will not be launched on this node.</description> |
| <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name> |
| <value>0.25</value> |
| </property> |
| |
| <property> |
| <description>The maximum percentage of disk space utilization allowed after |
| which a disk is marked as bad. Values can range from 0.0 to 100.0. |
| If the value is greater than or equal to 100, the nodemanager will check |
| for full disk. This applies to yarn-nodemanager.local-dirs and |
| yarn.nodemanager.log-dirs.</description> |
| <name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name> |
| <value>100.0</value> |
| </property> |
| |
| <property> |
| <description>The minimum space that must be available on a disk for |
| it to be used. This applies to yarn-nodemanager.local-dirs and |
| yarn.nodemanager.log-dirs.</description> |
| <name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name> |
| <value>0</value> |
| </property> |
| |
| <property> |
| <description>The path to the Linux container executor.</description> |
| <name>yarn.nodemanager.linux-container-executor.path</name> |
| </property> |
| |
| <property> |
| <description>The class which should help the LCE handle resources.</description> |
| <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name> |
| <value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value> |
| <!-- <value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value> --> |
| </property> |
| |
| <property> |
| <description>The cgroups hierarchy under which to place YARN proccesses (cannot contain commas). |
| If yarn.nodemanager.linux-container-executor.cgroups.mount is false (that is, if cgroups have |
| been pre-configured), then this cgroups hierarchy must already exist and be writable by the |
| NodeManager user, otherwise the NodeManager may fail. |
| Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description> |
| <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name> |
| <value>/hadoop-yarn</value> |
| </property> |
| |
| <property> |
| <description>Whether the LCE should attempt to mount cgroups if not found. |
| Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler.</description> |
| <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>Where the LCE should attempt to mount cgroups if not found. Common locations |
| include /sys/fs/cgroup and /cgroup; the default location can vary depending on the Linux |
| distribution in use. This path must exist before the NodeManager is launched. |
| Only used when the LCE resources handler is set to the CgroupsLCEResourcesHandler, and |
| yarn.nodemanager.linux-container-executor.cgroups.mount is true.</description> |
| <name>yarn.nodemanager.linux-container-executor.cgroups.mount-path</name> |
| </property> |
| |
| <property> |
| <description>This determines which of the two modes that LCE should use on |
| a non-secure cluster. If this value is set to true, then all containers |
| will be launched as the user specified in |
| yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user. If |
| this value is set to false, then containers will run as the user who |
| submitted the application.</description> |
| <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>The UNIX user that containers will run as when |
| Linux-container-executor is used in nonsecure mode (a use case for this |
| is using cgroups) if the |
| yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users is |
| set to true.</description> |
| <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.local-user</name> |
| <value>nobody</value> |
| </property> |
| |
| <property> |
| <description>The allowed pattern for UNIX user names enforced by |
| Linux-container-executor when used in nonsecure mode (use case for this |
| is using cgroups). The default value is taken from /usr/sbin/adduser</description> |
| <name>yarn.nodemanager.linux-container-executor.nonsecure-mode.user-pattern</name> |
| <value>^[_.A-Za-z0-9][-@_.A-Za-z0-9]{0,255}?[$]?$</value> |
| </property> |
| |
| <property> |
| <description>T-file compression types used to compress aggregated logs.</description> |
| <name>yarn.nodemanager.log-aggregation.compression-type</name> |
| <value>none</value> |
| </property> |
| |
| <property> |
| <description>The kerberos principal for the node manager.</description> |
| <name>yarn.nodemanager.principal</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>the valid service name should only contain a-zA-Z0-9_ and can not start with numbers</description> |
| <name>yarn.nodemanager.aux-services</name> |
| <value></value> |
| <!--<value>mapreduce_shuffle</value>--> |
| </property> |
| |
| <property> |
| <description>No. of ms to wait between sending a SIGTERM and SIGKILL to a container</description> |
| <name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name> |
| <value>250</value> |
| </property> |
| |
| <property> |
| <description>Max time to wait for a process to come up when trying to cleanup a container</description> |
| <name>yarn.nodemanager.process-kill-wait.ms</name> |
| <value>2000</value> |
| </property> |
| |
| <property> |
| <description>Max time, in seconds, to wait to establish a connection to RM when NM starts. |
| The NM will shutdown if it cannot connect to RM within the specified max time period. |
| If the value is set as -1, then NM will retry forever.</description> |
| <name>yarn.nodemanager.resourcemanager.connect.wait.secs</name> |
| <value>900</value> |
| </property> |
| |
| <property> |
| <description>Time interval, in seconds, between each NM attempt to connect to RM.</description> |
| <name>yarn.nodemanager.resourcemanager.connect.retry_interval.secs</name> |
| <value>30</value> |
| </property> |
| |
| <property> |
| <description>The minimum allowed version of a resourcemanager that a nodemanager will connect to. |
| The valid values are NONE (no version checking), EqualToNM (the resourcemanager's version is |
| equal to or greater than the NM version), or a Version String.</description> |
| <name>yarn.nodemanager.resourcemanager.minimum.version</name> |
| <value>NONE</value> |
| </property> |
| |
| <property> |
| <description>Max number of threads in NMClientAsync to process container |
| management events</description> |
| <name>yarn.client.nodemanager-client-async.thread-pool-max-size</name> |
| <value>500</value> |
| </property> |
| |
| <property> |
| <description> |
| Maximum number of proxy connections for node manager. It should always be |
| more than 1. NMClient and MRAppMaster will use this to cache connection |
| with node manager. There will be at max one connection per node manager. |
| Ex. configuring it to a value of 5 will make sure that client will at |
| max have 5 connections cached with 5 different node managers. These |
| connections will be timed out if idle for more than system wide idle |
| timeout period. The token if used for authentication then it will be used |
| only at connection creation time. If new token is received then earlier |
| connection should be closed in order to use newer token. This and |
| (yarn.client.nodemanager-client-async.thread-pool-max-size) are related |
| and should be sync (no need for them to be equal). |
| </description> |
| <name>yarn.client.max-nodemanagers-proxies</name> |
| <value>500</value> |
| </property> |
| |
| <property> |
| <description>Enable the node manager to recover after starting</description> |
| <name>yarn.nodemanager.recovery.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>The local filesystem directory in which the node manager will |
| store state when recovery is enabled.</description> |
| <name>yarn.nodemanager.recovery.dir</name> |
| <value>${hadoop.tmp.dir}/yarn-nm-recovery</value> |
| </property> |
| |
| <!--Map Reduce configuration--> |
| <property> |
| <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name> |
| <value>org.apache.hadoop.mapred.ShuffleHandler</value> |
| </property> |
| |
| <property> |
| <name>mapreduce.job.jar</name> |
| <value/> |
| </property> |
| |
| <property> |
| <name>mapreduce.job.hdfs-servers</name> |
| <value>${fs.defaultFS}</value> |
| </property> |
| |
| <!-- WebAppProxy Configuration--> |
| |
| <property> |
| <description>The kerberos principal for the proxy, if the proxy is not |
| running as part of the RM.</description> |
| <name>yarn.web-proxy.principal</name> |
| <value/> |
| </property> |
| |
| <property> |
| <description>Keytab for WebAppProxy, if the proxy is not running as part of |
| the RM.</description> |
| <name>yarn.web-proxy.keytab</name> |
| </property> |
| |
| <property> |
| <description>The address for the web proxy as HOST:PORT, if this is not |
| given then the proxy will run as part of the RM</description> |
| <name>yarn.web-proxy.address</name> |
| <value/> |
| </property> |
| |
| <!-- Applications' Configuration--> |
| |
| <property> |
| <description> |
| CLASSPATH for YARN applications. A comma-separated list |
| of CLASSPATH entries. When this value is empty, the following default |
| CLASSPATH for YARN applications would be used. |
| For Linux: |
| $HADOOP_CONF_DIR, |
| $HADOOP_COMMON_HOME/share/hadoop/common/*, |
| $HADOOP_COMMON_HOME/share/hadoop/common/lib/*, |
| $HADOOP_HDFS_HOME/share/hadoop/hdfs/*, |
| $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*, |
| $HADOOP_YARN_HOME/share/hadoop/yarn/*, |
| $HADOOP_YARN_HOME/share/hadoop/yarn/lib/* |
| For Windows: |
| %HADOOP_CONF_DIR%, |
| %HADOOP_COMMON_HOME%/share/hadoop/common/*, |
| %HADOOP_COMMON_HOME%/share/hadoop/common/lib/*, |
| %HADOOP_HDFS_HOME%/share/hadoop/hdfs/*, |
| %HADOOP_HDFS_HOME%/share/hadoop/hdfs/lib/*, |
| %HADOOP_YARN_HOME%/share/hadoop/yarn/*, |
| %HADOOP_YARN_HOME%/share/hadoop/yarn/lib/* |
| </description> |
| <name>yarn.application.classpath</name> |
| <value></value> |
| </property> |
| |
| <!-- Timeline Service's Configuration--> |
| |
| <property> |
| <description>Indicate to clients whether timeline service is enabled or not. |
| If enabled, clients will put entities and events to the timeline server. |
| </description> |
| <name>yarn.timeline-service.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>The hostname of the timeline service web application.</description> |
| <name>yarn.timeline-service.hostname</name> |
| <value>0.0.0.0</value> |
| </property> |
| |
| <property> |
| <description>This is default address for the timeline server to start the |
| RPC server.</description> |
| <name>yarn.timeline-service.address</name> |
| <value>${yarn.timeline-service.hostname}:10200</value> |
| </property> |
| |
| <property> |
| <description>The http address of the timeline service web application.</description> |
| <name>yarn.timeline-service.webapp.address</name> |
| <value>${yarn.timeline-service.hostname}:8188</value> |
| </property> |
| |
| <property> |
| <description>The https address of the timeline service web application.</description> |
| <name>yarn.timeline-service.webapp.https.address</name> |
| <value>${yarn.timeline-service.hostname}:8190</value> |
| </property> |
| |
| <property> |
| <description> |
| The actual address the server will bind to. If this optional address is |
| set, the RPC and webapp servers will bind to this address and the port specified in |
| yarn.timeline-service.address and yarn.timeline-service.webapp.address, respectively. |
| This is most useful for making the service listen to all interfaces by setting to |
| 0.0.0.0. |
| </description> |
| <name>yarn.timeline-service.bind-host</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>Store class name for timeline store.</description> |
| <name>yarn.timeline-service.store-class</name> |
| <value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value> |
| </property> |
| |
| <property> |
| <description>Enable age off of timeline store data.</description> |
| <name>yarn.timeline-service.ttl-enable</name> |
| <value>true</value> |
| </property> |
| |
| <property> |
| <description>Time to live for timeline store data in milliseconds.</description> |
| <name>yarn.timeline-service.ttl-ms</name> |
| <value>604800000</value> |
| </property> |
| |
| <property> |
| <description>Store file name for leveldb timeline store.</description> |
| <name>yarn.timeline-service.leveldb-timeline-store.path</name> |
| <value>${hadoop.tmp.dir}/yarn/timeline</value> |
| </property> |
| |
| <property> |
| <description>Length of time to wait between deletion cycles of leveldb timeline store in milliseconds.</description> |
| <name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name> |
| <value>300000</value> |
| </property> |
| |
| <property> |
| <description>Size of read cache for uncompressed blocks for leveldb timeline store in bytes.</description> |
| <name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name> |
| <value>104857600</value> |
| </property> |
| |
| <property> |
| <description>Size of cache for recently read entity start times for leveldb timeline store in number of entities.</description> |
| <name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name> |
| <value>10000</value> |
| </property> |
| |
| <property> |
| <description>Size of cache for recently written entity start times for leveldb timeline store in number of entities.</description> |
| <name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name> |
| <value>10000</value> |
| </property> |
| |
| <property> |
| <description>Handler thread count to serve the client RPC requests.</description> |
| <name>yarn.timeline-service.handler-thread-count</name> |
| <value>10</value> |
| </property> |
| |
| <property> |
| <name>yarn.timeline-service.http-authentication.type</name> |
| <value>simple</value> |
| <description> |
| Defines authentication used for the timeline server HTTP endpoint. |
| Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME# |
| </description> |
| </property> |
| |
| <property> |
| <name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name> |
| <value>true</value> |
| <description> |
| Indicates if anonymous requests are allowed by the timeline server when using |
| 'simple' authentication. |
| </description> |
| </property> |
| |
| <property> |
| <description>The Kerberos principal for the timeline server.</description> |
| <name>yarn.timeline-service.principal</name> |
| <value></value> |
| </property> |
| |
| <property> |
| <description>The Kerberos keytab for the timeline server.</description> |
| <name>yarn.timeline-service.keytab</name> |
| <value>/etc/krb5.keytab</value> |
| </property> |
| |
| <property> |
| <description>Indicate to ResourceManager as well as clients whether |
| history-service is enabled or not. If enabled, ResourceManager starts |
| recording historical data that ApplicationHistory service can consume. |
| Similarly, clients can redirect to the history service when applications |
| finish if this is enabled.</description> |
| <name>yarn.timeline-service.generic-application-history.enabled</name> |
| <value>false</value> |
| </property> |
| |
| <property> |
| <description>URI pointing to the location of the FileSystem path where |
| the history will be persisted. This must be supplied when using |
| org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore |
| as the value for yarn.timeline-service.generic-application-history.store-class</description> |
| <name>yarn.timeline-service.generic-application-history.fs-history-store.uri</name> |
| <value>${hadoop.tmp.dir}/yarn/timeline/generic-history</value> |
| </property> |
| |
| <property> |
| <description>T-file compression types used to compress history data.</description> |
| <name>yarn.timeline-service.generic-application-history.fs-history-store.compression-type</name> |
| <value>none</value> |
| </property> |
| |
| <property> |
| <description>Store class name for history store, defaulting to file |
| system store </description> |
| <name>yarn.timeline-service.generic-application-history.store-class</name> |
| <value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value> |
| </property> |
| |
| <!-- Other configuration --> |
| <property> |
| <description>The interval that the yarn client library uses to poll the |
| completion status of the asynchronous API of application client protocol. |
| </description> |
| <name>yarn.client.application-client-protocol.poll-interval-ms</name> |
| <value>200</value> |
| </property> |
| |
| <property> |
| <description>RSS usage of a process computed via |
| /proc/pid/stat is not very accurate as it includes shared pages of a |
| process. /proc/pid/smaps provides useful information like |
| Private_Dirty, Private_Clean, Shared_Dirty, Shared_Clean which can be used |
| for computing more accurate RSS. When this flag is enabled, RSS is computed |
| as Min(Shared_Dirty, Pss) + Private_Clean + Private_Dirty. It excludes |
| read-only shared mappings in RSS computation. |
| </description> |
| <name>yarn.nodemanager.container-monitor.procfs-tree.smaps-based-rss.enabled</name> |
| <value>false</value> |
| </property> |
| </configuration> |