| <?xml version="1.0"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| |
| <!-- Do not modify this file directly. Instead, copy entries that you --> |
| <!-- wish to modify from this file into core-site.xml and change them --> |
| <!-- there. If core-site.xml does not already exist, create it. --> |
| |
| <configuration> |
| |
| <!--- global properties --> |
| |
| <property> |
| <name>hadoop.tmp.dir</name> |
| <value>/tmp/hadoop-${user.name}</value> |
| <description>A base for other temporary directories.</description> |
| </property> |
| |
| <property> |
| <name>hadoop.native.lib</name> |
| <value>true</value> |
| <description>Should native hadoop libraries, if present, be used.</description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.filter.initializers</name> |
| <value></value> |
| <description>A comma separated list of class names. Each class in the list |
| must extend org.apache.hadoop.http.FilterInitializer. The corresponding |
| Filter will be initialized. Then, the Filter will be applied to all user |
| facing jsp and servlet web pages. The ordering of the list defines the |
| ordering of the filters.</description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.group.mapping</name> |
| <value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value> |
| <description>Class for user to group mapping (get groups for a given user) |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.authorization</name> |
| <value>false</value> |
| <description>Is service-level authorization enabled?</description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.instrumentation.requires.admin</name> |
| <value>false</value> |
| <description> |
| Indicates if administrator ACLs are required to access |
| instrumentation servlets (JMX, METRICS, CONF, STACKS). |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.authentication</name> |
| <value>simple</value> |
| <description>Possible values are simple (no authentication), and kerberos |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.token.service.use_ip</name> |
| <value>true</value> |
| <description>Controls whether tokens always use IP addresses. DNS changes |
| will not be detected if this option is enabled. Existing client connections |
| that break will always reconnect to the IP of the original host. New clients |
| will connect to the host's new IP but fail to locate a token. Disabling |
| this option will allow existing and new clients to detect an IP change and |
| continue to locate the new host's token. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.use-weak-http-crypto</name> |
| <value>false</value> |
| <description>If enabled, use KSSL to authenticate HTTP connections to the |
| NameNode. Due to a bug in JDK6, using KSSL requires one to configure |
| Kerberos tickets to use encryption types that are known to be |
| cryptographically weak. If disabled, SPNEGO will be used for HTTP |
| authentication, which supports stronger encryption types. |
| </description> |
| </property> |
| |
| <!-- |
| <property> |
| <name>hadoop.security.service.user.name.key</name> |
| <value></value> |
| <description>Name of the kerberos principal of the user that owns |
| a given service daemon |
| </description> |
| </property> |
| --> |
| |
| <!--- logging properties --> |
| |
| <property> |
| <name>hadoop.logfile.size</name> |
| <value>10000000</value> |
| <description>The max size of each log file</description> |
| </property> |
| |
| <property> |
| <name>hadoop.logfile.count</name> |
| <value>10</value> |
| <description>The max number of log files</description> |
| </property> |
| |
| <!-- i/o properties --> |
| <property> |
| <name>io.file.buffer.size</name> |
| <value>4096</value> |
| <description>The size of buffer for use in sequence files. |
| The size of this buffer should probably be a multiple of hardware |
| page size (4096 on Intel x86), and it determines how much data is |
| buffered during read and write operations.</description> |
| </property> |
| |
| <property> |
| <name>io.bytes.per.checksum</name> |
| <value>512</value> |
| <description>The number of bytes per checksum. Must not be larger than |
| io.file.buffer.size.</description> |
| </property> |
| |
| <property> |
| <name>io.skip.checksum.errors</name> |
| <value>false</value> |
| <description>If true, when a checksum error is encountered while |
| reading a sequence file, entries are skipped, instead of throwing an |
| exception.</description> |
| </property> |
| |
| <property> |
| <name>io.compression.codecs</name> |
| <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec</value> |
| <description>A list of the compression codec classes that can be used |
| for compression/decompression.</description> |
| </property> |
| |
| <property> |
| <name>io.serializations</name> |
| <value>org.apache.hadoop.io.serializer.WritableSerialization</value> |
| <description>A list of serialization classes that can be used for |
| obtaining serializers and deserializers.</description> |
| </property> |
| |
| <!-- file system properties --> |
| |
| <property> |
| <name>fs.default.name</name> |
| <value>file:///</value> |
| <description>The name of the default file system. A URI whose |
| scheme and authority determine the FileSystem implementation. The |
| uri's scheme determines the config property (fs.SCHEME.impl) naming |
| the FileSystem implementation class. The uri's authority is used to |
| determine the host, port, etc. for a filesystem.</description> |
| </property> |
| |
| <property> |
| <name>fs.trash.interval</name> |
| <value>0</value> |
| <description>Number of minutes between trash checkpoints. |
| If zero, the trash feature is disabled. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.file.impl</name> |
| <value>org.apache.hadoop.fs.LocalFileSystem</value> |
| <description>The FileSystem for file: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.hdfs.impl</name> |
| <value>org.apache.hadoop.hdfs.DistributedFileSystem</value> |
| <description>The FileSystem for hdfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3.impl</name> |
| <value>org.apache.hadoop.fs.s3.S3FileSystem</value> |
| <description>The FileSystem for s3: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3n.impl</name> |
| <value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value> |
| <description>The FileSystem for s3n: (Native S3) uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.kfs.impl</name> |
| <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value> |
| <description>The FileSystem for kfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.hftp.impl</name> |
| <value>org.apache.hadoop.hdfs.HftpFileSystem</value> |
| </property> |
| |
| <property> |
| <name>fs.hsftp.impl</name> |
| <value>org.apache.hadoop.hdfs.HsftpFileSystem</value> |
| </property> |
| |
| <property> |
| <name>fs.webhdfs.impl</name> |
| <value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value> |
| </property> |
| |
| <property> |
| <name>fs.ftp.impl</name> |
| <value>org.apache.hadoop.fs.ftp.FTPFileSystem</value> |
| <description>The FileSystem for ftp: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.ramfs.impl</name> |
| <value>org.apache.hadoop.fs.InMemoryFileSystem</value> |
| <description>The FileSystem for ramfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.har.impl</name> |
| <value>org.apache.hadoop.fs.HarFileSystem</value> |
| <description>The filesystem for Hadoop archives. </description> |
| </property> |
| |
| <property> |
| <name>fs.har.impl.disable.cache</name> |
| <value>true</value> |
| <description>Don't cache 'har' filesystem instances.</description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.dir</name> |
| <value>${hadoop.tmp.dir}/dfs/namesecondary</value> |
| <description>Determines where on the local filesystem the DFS secondary |
| name node should store the temporary images to merge. |
| If this is a comma-delimited list of directories then the image is |
| replicated in all of the directories for redundancy. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.edits.dir</name> |
| <value>${fs.checkpoint.dir}</value> |
| <description>Determines where on the local filesystem the DFS secondary |
| name node should store the temporary edits to merge. |
| If this is a comma-delimited list of directoires then teh edits is |
| replicated in all of the directoires for redundancy. |
| Default value is same as fs.checkpoint.dir |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.period</name> |
| <value>3600</value> |
| <description>The number of seconds between two periodic checkpoints. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.size</name> |
| <value>67108864</value> |
| <description>The size of the current edit log (in bytes) that triggers |
| a periodic checkpoint even if the fs.checkpoint.period hasn't expired. |
| </description> |
| </property> |
| |
| |
| |
| <property> |
| <name>fs.s3.block.size</name> |
| <value>67108864</value> |
| <description>Block size to use when writing files to S3.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3.buffer.dir</name> |
| <value>${hadoop.tmp.dir}/s3</value> |
| <description>Determines where on the local filesystem the S3 filesystem |
| should store files before sending them to S3 |
| (or after retrieving them from S3). |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.s3.maxRetries</name> |
| <value>4</value> |
| <description>The maximum number of retries for reading or writing files to S3, |
| before we signal failure to the application. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.s3.sleepTimeSeconds</name> |
| <value>10</value> |
| <description>The number of seconds to sleep between each S3 retry. |
| </description> |
| </property> |
| |
| |
| <property> |
| <name>local.cache.size</name> |
| <value>10737418240</value> |
| <description>The limit on the size of cache you want to keep, set by default |
| to 10GB. This will act as a soft limit on the cache directory for out of band data. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.compress.blocksize</name> |
| <value>1000000</value> |
| <description>The minimum block size for compression in block compressed |
| SequenceFiles. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.lazydecompress</name> |
| <value>true</value> |
| <description>Should values of block-compressed SequenceFiles be decompressed |
| only when necessary. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.sorter.recordlimit</name> |
| <value>1000000</value> |
| <description>The limit on number of records to be kept in memory in a spill |
| in SequenceFiles.Sorter |
| </description> |
| </property> |
| |
| <property> |
| <name>io.mapfile.bloom.size</name> |
| <value>1048576</value> |
| <description>The size of BloomFilter-s used in BloomMapFile. Each time this many |
| keys is appended the next BloomFilter will be created (inside a DynamicBloomFilter). |
| Larger values minimize the number of filters, which slightly increases the performance, |
| but may waste too much space if the total number of keys is usually much smaller |
| than this number. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.mapfile.bloom.error.rate</name> |
| <value>0.005</value> |
| <description>The rate of false positives in BloomFilter-s used in BloomMapFile. |
| As this value decreases, the size of BloomFilter-s increases exponentially. This |
| value is the probability of encountering false positives (default is 0.5%). |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.util.hash.type</name> |
| <value>murmur</value> |
| <description>The default implementation of Hash. Currently this can take one of the |
| two values: 'murmur' to select MurmurHash and 'jenkins' to select JenkinsHash. |
| </description> |
| </property> |
| |
| |
| <!-- ipc properties --> |
| |
| <property> |
| <name>ipc.client.idlethreshold</name> |
| <value>4000</value> |
| <description>Defines the threshold number of connections after which |
| connections will be inspected for idleness. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.kill.max</name> |
| <value>10</value> |
| <description>Defines the maximum number of clients to disconnect in one go. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.connection.maxidletime</name> |
| <value>10000</value> |
| <description>The maximum time in msec after which a client will bring down the |
| connection to the server. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.connect.max.retries</name> |
| <value>10</value> |
| <description>Indicates the number of retries a client will make to establish |
| a server connection. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.server.listen.queue.size</name> |
| <value>128</value> |
| <description>Indicates the length of the listen queue for servers accepting |
| client connections. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.server.tcpnodelay</name> |
| <value>false</value> |
| <description>Turn on/off Nagle's algorithm for the TCP socket connection on |
| the server. Setting to true disables the algorithm and may decrease latency |
| with a cost of more/smaller packets. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.tcpnodelay</name> |
| <value>false</value> |
| <description>Turn on/off Nagle's algorithm for the TCP socket connection on |
| the client. Setting to true disables the algorithm and may decrease latency |
| with a cost of more/smaller packets. |
| </description> |
| </property> |
| |
| |
| <!-- Web Interface Configuration --> |
| |
| <property> |
| <name>webinterface.private.actions</name> |
| <value>false</value> |
| <description> If set to true, the web interfaces of JT and NN may contain |
| actions, such as kill job, delete file, etc., that should |
| not be exposed to public. Enable this option if the interfaces |
| are only reachable by those who have the right authorization. |
| </description> |
| </property> |
| |
| <!-- Proxy Configuration --> |
| |
| <property> |
| <name>hadoop.rpc.socket.factory.class.default</name> |
| <value>org.apache.hadoop.net.StandardSocketFactory</value> |
| <description> Default SocketFactory to use. This parameter is expected to be |
| formatted as "package.FactoryClassName". |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.rpc.socket.factory.class.ClientProtocol</name> |
| <value></value> |
| <description> SocketFactory to use to connect to a DFS. If null or empty, use |
| hadoop.rpc.socket.class.default. This socket factory is also used by |
| DFSClient to create sockets to DataNodes. |
| </description> |
| </property> |
| |
| |
| |
| <property> |
| <name>hadoop.socks.server</name> |
| <value></value> |
| <description> Address (host:port) of the SOCKS server to be used by the |
| SocksSocketFactory. |
| </description> |
| </property> |
| |
| |
| <!-- Topology Configuration --> |
| |
| <property> |
| <name>topology.node.switch.mapping.impl</name> |
| <value>org.apache.hadoop.net.ScriptBasedMapping</value> |
| <description> The default implementation of the DNSToSwitchMapping. It |
| invokes a script specified in topology.script.file.name to resolve |
| node names. If the value for topology.script.file.name is not set, the |
| default value of DEFAULT_RACK is returned for all node names. |
| </description> |
| </property> |
| |
| <property> |
| <name>net.topology.impl</name> |
| <value>org.apache.hadoop.net.NetworkTopology</value> |
| <description> The default implementation of NetworkTopology which is classic three layer one. |
| </description> |
| </property> |
| |
| <property> |
| <name>topology.script.file.name</name> |
| <value></value> |
| <description> The script name that should be invoked to resolve DNS names to |
| NetworkTopology names. Example: the script would take host.foo.bar as an |
| argument, and return /rack1 as the output. |
| </description> |
| </property> |
| |
| <property> |
| <name>topology.script.number.args</name> |
| <value>100</value> |
| <description> The max number of args that the script configured with |
| topology.script.file.name should be run with. Each arg is an |
| IP address. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.security.uid.cache.secs</name> |
| <value>14400</value> |
| <description> NativeIO maintains a cache from UID to UserName. This is |
| the timeout for an entry in that cache. </description> |
| </property> |
| |
| <!-- HTTP web-consoles Authentication --> |
| |
| <property> |
| <name>hadoop.http.authentication.type</name> |
| <value>simple</value> |
| <description> |
| Defines authentication used for Oozie HTTP endpoint. |
| Supported values are: simple | kerberos | #AUTHENTICATION_HANDLER_CLASSNAME# |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.token.validity</name> |
| <value>36000</value> |
| <description> |
| Indicates how long (in seconds) an authentication token is valid before it has |
| to be renewed. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.signature.secret.file</name> |
| <value>${user.home}/hadoop-http-auth-signature-secret</value> |
| <description> |
| The signature secret for signing the authentication tokens. |
| If not set a random secret is generated at startup time. |
| The same secret should be used for JT/NN/DN/TT configurations. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.cookie.domain</name> |
| <value></value> |
| <description> |
| The domain to use for the HTTP cookie that stores the authentication token. |
| In order to authentiation to work correctly across all Hadoop nodes web-consoles |
| the domain must be correctly set. |
| IMPORTANT: when using IP addresses, browsers ignore cookies with domain settings. |
| For this setting to work properly all nodes in the cluster must be configured |
| to generate URLs with hostname.domain names on it. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.simple.anonymous.allowed</name> |
| <value>true</value> |
| <description> |
| Indicates if anonymous requests are allowed when using 'simple' authentication. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.kerberos.principal</name> |
| <value>HTTP/localhost@LOCALHOST</value> |
| <description> |
| Indicates the Kerberos principal to be used for HTTP endpoint. |
| The principal MUST start with 'HTTP/' as per Kerberos HTTP SPNEGO specification. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.http.authentication.kerberos.keytab</name> |
| <value>${user.home}/hadoop.keytab</value> |
| <description> |
| Location of the keytab file with the credentials for the principal. |
| Referring to the same keytab file Oozie uses for its Kerberos credentials for Hadoop. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.relaxed.worker.version.check</name> |
| <value>false</value> |
| <description> |
| By default datanodes refuse to connect to namenodes if their build |
| revision (svn revision) do not match, and tasktrackers refuse to |
| connect to jobtrackers if their build version (version, revision, |
| user, and source checksum) do not match. This option changes the |
| behavior of hadoop workers to only check for a version match (eg |
| "1.0.2") but ignore the other build fields (revision, user, and |
| source checksum). |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.skip.worker.version.check</name> |
| <value>false</value> |
| <description> |
| By default datanodes refuse to connect to namenodes if their build |
| revision (svn revision) do not match, and tasktrackers refuse to |
| connect to jobtrackers if their build version (version, revision, |
| user, and source checksum) do not match. This option changes the |
| behavior of hadoop workers to skip doing a version check at all. |
| This option supersedes the 'hadoop.relaxed.worker.version.check' |
| option. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.jetty.logs.serve.aliases</name> |
| <value>true</value> |
| <description> |
| Enable/Disable aliases serving from jetty |
| </description> |
| </property> |
| |
| |
| <property> |
| <name>hadoop.user.group.static.mapping.overrides</name> |
| <value>dr.who=;</value> |
| <description> |
| Static mapping of user to groups. This will override the groups if |
| available in the system for the specified user. In otherwords, groups |
| look-up will not happen for these users, instead groups mapped in this |
| configuration will be used. |
| Mapping should be in this format. |
| user1=group1,group2;user2=;user3=group2; |
| Default, "dr.who=;" will consider "dr.who" as user without groups. |
| </description> |
| </property> |
| </configuration> |