| <?xml version="1.0"?> |
| <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> |
| |
| <!-- Do not modify this file directly. Instead, copy entries that you --> |
| <!-- wish to modify from this file into hadoop-site.xml and change them --> |
| <!-- there. If hadoop-site.xml does not already exist, create it. --> |
| |
| <configuration> |
| |
| <!--- global properties --> |
| |
| <property> |
| <name>hadoop.tmp.dir</name> |
| <value>/tmp/hadoop-${user.name}</value> |
| <description>A base for other temporary directories.</description> |
| </property> |
| |
| <property> |
| <name>hadoop.native.lib</name> |
| <value>true</value> |
| <description>Should native hadoop libraries, if present, be used.</description> |
| </property> |
| |
| <!--- logging properties --> |
| |
| <property> |
| <name>hadoop.logfile.size</name> |
| <value>10000000</value> |
| <description>The max size of each log file</description> |
| </property> |
| |
| <property> |
| <name>hadoop.logfile.count</name> |
| <value>10</value> |
| <description>The max number of log files</description> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.logging.level</name> |
| <value>info</value> |
| <description>The logging level for dfs namenode. Other values are "dir"(trac |
| e namespace mutations), "block"(trace block under/over replications and block |
| creations/deletions), or "all".</description> |
| </property> |
| |
| <!-- i/o properties --> |
| |
| <property> |
| <name>io.sort.factor</name> |
| <value>10</value> |
| <description>The number of streams to merge at once while sorting |
| files. This determines the number of open file handles.</description> |
| </property> |
| |
| <property> |
| <name>io.sort.mb</name> |
| <value>100</value> |
| <description>The total amount of buffer memory to use while sorting |
| files, in megabytes. By default, gives each merge stream 1MB, which |
| should minimize seeks.</description> |
| </property> |
| |
| <property> |
| <name>io.file.buffer.size</name> |
| <value>4096</value> |
| <description>The size of buffer for use in sequence files. |
| The size of this buffer should probably be a multiple of hardware |
| page size (4096 on Intel x86), and it determines how much data is |
| buffered during read and write operations.</description> |
| </property> |
| |
| <property> |
| <name>io.bytes.per.checksum</name> |
| <value>512</value> |
| <description>The number of bytes per checksum. Must not be larger than |
| io.file.buffer.size.</description> |
| </property> |
| |
| <property> |
| <name>io.skip.checksum.errors</name> |
| <value>false</value> |
| <description>If true, when a checksum error is encountered while |
| reading a sequence file, entries are skipped, instead of throwing an |
| exception.</description> |
| </property> |
| |
| <property> |
| <name>io.map.index.skip</name> |
| <value>0</value> |
| <description>Number of index entries to skip between each entry. |
| Zero by default. Setting this to values larger than zero can |
| facilitate opening large map files using less memory.</description> |
| </property> |
| |
| <property> |
| <name>io.compression.codecs</name> |
| <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec</value> |
| <description>A list of the compression codec classes that can be used |
| for compression/decompression.</description> |
| </property> |
| |
| <!-- file system properties --> |
| |
| <property> |
| <name>fs.default.name</name> |
| <value>file:///</value> |
| <description>The name of the default file system. A URI whose |
| scheme and authority determine the FileSystem implementation. The |
| uri's scheme determines the config property (fs.SCHEME.impl) naming |
| the FileSystem implementation class. The uri's authority is used to |
| determine the host, port, etc. for a filesystem.</description> |
| </property> |
| |
| <property> |
| <name>fs.trash.root</name> |
| <value>${hadoop.tmp.dir}/Trash</value> |
| <description>The trash directory, used by FsShell's 'rm' command. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.trash.interval</name> |
| <value>0</value> |
| <description>Number of minutes between trash checkpoints. |
| If zero, the trash feature is disabled. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.file.impl</name> |
| <value>org.apache.hadoop.fs.LocalFileSystem</value> |
| <description>The FileSystem for file: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.hdfs.impl</name> |
| <value>org.apache.hadoop.dfs.DistributedFileSystem</value> |
| <description>The FileSystem for hdfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3.impl</name> |
| <value>org.apache.hadoop.fs.s3.S3FileSystem</value> |
| <description>The FileSystem for s3: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.kfs.impl</name> |
| <value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value> |
| <description>The FileSystem for kfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.hftp.impl</name> |
| <value>org.apache.hadoop.dfs.HftpFileSystem</value> |
| </property> |
| |
| <property> |
| <name>fs.ramfs.impl</name> |
| <value>org.apache.hadoop.fs.InMemoryFileSystem</value> |
| <description>The FileSystem for ramfs: uris.</description> |
| </property> |
| |
| <property> |
| <name>fs.inmemory.size.mb</name> |
| <value>75</value> |
| <description>The size of the in-memory filsystem instance in MB</description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.dir</name> |
| <value>${hadoop.tmp.dir}/dfs/namesecondary</value> |
| <description>Determines where on the local filesystem the DFS secondary |
| name node should store the temporary images and edits to merge. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.period</name> |
| <value>3600</value> |
| <description>The number of seconds between two periodic checkpoints. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.checkpoint.size</name> |
| <value>67108864</value> |
| <description>The size of the current edit log (in bytes) that triggers |
| a periodic checkpoint even if the fs.checkpoint.period hasn't expired. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.secondary.info.port</name> |
| <value>50090</value> |
| <description>The base number for the Secondary namenode info port. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.secondary.info.bindAddress</name> |
| <value>0.0.0.0</value> |
| <description> |
| The address where the secondary namenode web UI will listen to. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.bindAddress</name> |
| <value>0.0.0.0</value> |
| <description> |
| the address where the datanode will listen to. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.port</name> |
| <value>50010</value> |
| <description>The port number that the dfs datanode server uses as a starting |
| point to look for a free port to listen on. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.info.bindAddress</name> |
| <value>0.0.0.0</value> |
| <description> |
| the address where the dfs namenode web ui will listen on. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.info.port</name> |
| <value>50070</value> |
| <description>The base port number for the dfs namenode web ui. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.dns.interface</name> |
| <value>default</value> |
| <description>The name of the Network Interface from which a data node should |
| report its IP address. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.dns.nameserver</name> |
| <value>default</value> |
| <description>The host name or IP address of the name server (DNS) |
| which a DataNode should use to determine the host name used by the |
| NameNode for communication and display purposes. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.replication.considerLoad</name> |
| <value>true</value> |
| <description>Decide if chooseTarget considers the target's load or not |
| </description> |
| </property> |
| <property> |
| <name>dfs.default.chunk.view.size</name> |
| <value>32768</value> |
| <description>The number of bytes to view for a file on the browser. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.du.reserved</name> |
| <value>0</value> |
| <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.datanode.du.pct</name> |
| <value>0.98f</value> |
| <description>When calculating remaining space, only use this percentage of the real available space |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.name.dir</name> |
| <value>${hadoop.tmp.dir}/dfs/name</value> |
| <description>Determines where on the local filesystem the DFS name node |
| should store the name table. If this is a comma-delimited list |
| of directories then the name table is replicated in all of the |
| directories, for redundancy. </description> |
| </property> |
| |
| <property> |
| <name>dfs.client.buffer.dir</name> |
| <value>${hadoop.tmp.dir}/dfs/tmp</value> |
| <description>Determines where on the local filesystem an DFS client |
| should store its blocks before it sends them to the datanode. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.data.dir</name> |
| <value>${hadoop.tmp.dir}/dfs/data</value> |
| <description>Determines where on the local filesystem an DFS data node |
| should store its blocks. If this is a comma-delimited |
| list of directories, then data will be stored in all named |
| directories, typically on different devices. |
| Directories that do not exist are ignored. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.replication</name> |
| <value>3</value> |
| <description>Default block replication. |
| The actual number of replications can be specified when the file is created. |
| The default is used if replication is not specified in create time. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.replication.max</name> |
| <value>512</value> |
| <description>Maximal block replication. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.replication.min</name> |
| <value>1</value> |
| <description>Minimal block replication. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.block.size</name> |
| <value>67108864</value> |
| <description>The default block size for new files.</description> |
| </property> |
| |
| <property> |
| <name>dfs.df.interval</name> |
| <value>60000</value> |
| <description>Disk usage statistics refresh interval in msec.</description> |
| </property> |
| |
| <property> |
| <name>dfs.client.block.write.retries</name> |
| <value>3</value> |
| <description>The number of retries for writing blocks to the data nodes, |
| before we signal failure to the application. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.blockreport.intervalMsec</name> |
| <value>3600000</value> |
| <description>Determines block reporting interval in milliseconds.</description> |
| </property> |
| |
| <property> |
| <name>dfs.heartbeat.interval</name> |
| <value>3</value> |
| <description>Determines datanode heartbeat interval in seconds.</description> |
| </property> |
| |
| <property> |
| <name>dfs.namenode.handler.count</name> |
| <value>10</value> |
| <description>The number of server threads for the namenode.</description> |
| </property> |
| |
| <property> |
| <name>dfs.safemode.threshold.pct</name> |
| <value>0.999f</value> |
| <description> |
| Specifies the percentage of blocks that should satisfy |
| the minimal replication requirement defined by dfs.replication.min. |
| Values less than or equal to 0 mean not to start in safe mode. |
| Values greater than 1 will make safe mode permanent. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.safemode.extension</name> |
| <value>30000</value> |
| <description> |
| Determines extension of safe mode in milliseconds |
| after the threshold level is reached. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.network.script</name> |
| <value></value> |
| <description> |
| Specifies a script name that print the network location path |
| of the current machine. |
| </description> |
| </property> |
| |
| <property> |
| <name>dfs.hosts</name> |
| <value></value> |
| <description>Names a file that contains a list of hosts that are |
| permitted to connect to the namenode. The full pathname of the file |
| must be specified. If the value is empty, all hosts are |
| permitted.</description> |
| </property> |
| |
| <property> |
| <name>dfs.hosts.exclude</name> |
| <value></value> |
| <description>Names a file that contains a list of hosts that are |
| not permitted to connect to the namenode. The full pathname of the |
| file must be specified. If the value is empty, no hosts are |
| excluded.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3.block.size</name> |
| <value>67108864</value> |
| <description>Block size to use when writing files to S3.</description> |
| </property> |
| |
| <property> |
| <name>fs.s3.buffer.dir</name> |
| <value>${hadoop.tmp.dir}/s3</value> |
| <description>Determines where on the local filesystem the S3 filesystem |
| should store its blocks before it sends them to S3 |
| or after it retrieves them from S3. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.s3.maxRetries</name> |
| <value>4</value> |
| <description>The maximum number of retries for reading or writing blocks to S3, |
| before we signal failure to the application. |
| </description> |
| </property> |
| |
| <property> |
| <name>fs.s3.sleepTimeSeconds</name> |
| <value>10</value> |
| <description>The number of seconds to sleep between each S3 retry. |
| </description> |
| </property> |
| |
| <!-- map/reduce properties --> |
| |
| <property> |
| <name>mapred.job.tracker</name> |
| <value>local</value> |
| <description>The host and port that the MapReduce job tracker runs |
| at. If "local", then jobs are run in-process as a single map |
| and reduce task. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.job.tracker.info.bindAddress</name> |
| <value>0.0.0.0</value> |
| <description> |
| the address where the job tracker info webserver will be binded on. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.job.tracker.info.port</name> |
| <value>50030</value> |
| <description>The port that the MapReduce job tracker info webserver runs at. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.job.tracker.handler.count</name> |
| <value>10</value> |
| <description> |
| The number of server threads for the JobTracker. This should be roughly |
| 4% of the number of tasktracker nodes. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.task.tracker.report.bindAddress</name> |
| <value>127.0.0.1</value> |
| <description>The interface that task processes use to communicate |
| with their parent tasktracker process.</description> |
| </property> |
| |
| <property> |
| <name>mapred.local.dir</name> |
| <value>${hadoop.tmp.dir}/mapred/local</value> |
| <description>The local directory where MapReduce stores intermediate |
| data files. May be a comma-separated list of |
| directories on different devices in order to spread disk i/o. |
| Directories that do not exist are ignored. |
| </description> |
| </property> |
| |
| <property> |
| <name>local.cache.size</name> |
| <value>10737418240</value> |
| <description>The limit on the size of cache you want to keep, set by default |
| to 10GB. This will act as a soft limit on the cache directory for out of band data. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.system.dir</name> |
| <value>${hadoop.tmp.dir}/mapred/system</value> |
| <description>The shared directory where MapReduce stores control files. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.temp.dir</name> |
| <value>${hadoop.tmp.dir}/mapred/temp</value> |
| <description>A shared directory for temporary files. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.local.dir.minspacestart</name> |
| <value>0</value> |
| <description>If the space in mapred.local.dir drops under this, |
| do not ask for more tasks. |
| Value in bytes. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.local.dir.minspacekill</name> |
| <value>0</value> |
| <description>If the space in mapred.local.dir drops under this, |
| do not ask more tasks until all the current ones have finished and |
| cleaned up. Also, to save the rest of the tasks we have running, |
| kill one of them, to clean up some space. Start with the reduce tasks, |
| then go with the ones that have finished the least. |
| Value in bytes. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.tasktracker.expiry.interval</name> |
| <value>600000</value> |
| <description>Expert: The time-interval, in miliseconds, after which |
| a tasktracker is declared 'lost' if it doesn't send heartbeats. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.map.tasks</name> |
| <value>2</value> |
| <description>The default number of map tasks per job. Typically set |
| to a prime several times greater than number of available hosts. |
| Ignored when mapred.job.tracker is "local". |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.reduce.tasks</name> |
| <value>1</value> |
| <description>The default number of reduce tasks per job. Typically set |
| to a prime close to the number of available hosts. Ignored when |
| mapred.job.tracker is "local". |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.map.max.attempts</name> |
| <value>4</value> |
| <description>Expert: The maximum number of attempts per map task. |
| In other words, framework will try to execute a map task these many number |
| of times before giving up on it. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.reduce.max.attempts</name> |
| <value>4</value> |
| <description>Expert: The maximum number of attempts per reduce task. |
| In other words, framework will try to execute a reduce task these many number |
| of times before giving up on it. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.reduce.parallel.copies</name> |
| <value>5</value> |
| <description>The default number of parallel transfers run by reduce |
| during the copy(shuffle) phase. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.task.timeout</name> |
| <value>600000</value> |
| <description>The number of milliseconds before a task will be |
| terminated if it neither reads an input, writes an output, nor |
| updates its status string. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.tasktracker.tasks.maximum</name> |
| <value>2</value> |
| <description>The maximum number of tasks that will be run |
| simultaneously by a task tracker. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.jobtracker.completeuserjobs.maximum</name> |
| <value>100</value> |
| <description>The maximum number of complete jobs per user to keep around before delegating them to the job history. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.child.java.opts</name> |
| <value>-Xmx200m</value> |
| <description>Java opts for the task tracker child processes. Subsumes |
| 'mapred.child.heap.size' (If a mapred.child.heap.size value is found |
| in a configuration, its maximum heap size will be used and a warning |
| emitted that heap.size has been deprecated). Also, the following symbol, |
| if present, will be interpolated: @taskid@ is replaced by current TaskID. |
| Any other occurrences of '@' will go unchanged. For |
| example, to enable verbose gc logging to a file named for the taskid in |
| /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: |
| -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.inmem.merge.threshold</name> |
| <value>1000</value> |
| <description>The threshold, in terms of the number of files |
| for the in-memory merge process. When we accumulate threshold number of files |
| we initiate the in-memory merge and spill to disk. A value of 0 or less than |
| 0 indicates we want to DON'T have any threshold and instead depend only on |
| the ramfs's memory consumption to trigger the merge. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.speculative.execution</name> |
| <value>true</value> |
| <description>If true, then multiple instances of some map and reduce tasks |
| may be executed in parallel.</description> |
| </property> |
| |
| <property> |
| <name>mapred.min.split.size</name> |
| <value>0</value> |
| <description>The minimum size chunk that map input should be split |
| into. Note that some file formats may have minimum split sizes that |
| take priority over this setting.</description> |
| </property> |
| |
| <property> |
| <name>mapred.submit.replication</name> |
| <value>10</value> |
| <description>The replication level for submitted job files. This |
| should be around the square root of the number of nodes. |
| </description> |
| </property> |
| |
| |
| <property> |
| <name>mapred.tasktracker.dns.interface</name> |
| <value>default</value> |
| <description>The name of the Network Interface from which a task |
| tracker should report its IP address. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.tasktracker.dns.nameserver</name> |
| <value>default</value> |
| <description>The host name or IP address of the name server (DNS) |
| which a TaskTracker should use to determine the host name used by |
| the JobTracker for communication and display purposes. |
| </description> |
| </property> |
| |
| <property> |
| <name>tasktracker.http.threads</name> |
| <value>40</value> |
| <description>The number of worker threads that for the http server. This is |
| used for map output fetching |
| </description> |
| </property> |
| |
| <property> |
| <name>tasktracker.http.bindAddress</name> |
| <value>0.0.0.0</value> |
| <description> |
| the address where the task tracker http server will be binded on. |
| </description> |
| </property> |
| |
| <property> |
| <name>tasktracker.http.port</name> |
| <value>50060</value> |
| <description>The default port for task trackers to use as their http server. |
| </description> |
| </property> |
| |
| <property> |
| <name>keep.failed.task.files</name> |
| <value>false</value> |
| <description>Should the files for failed tasks be kept. This should only be |
| used on jobs that are failing, because the storage is never |
| reclaimed. It also prevents the map outputs from being erased |
| from the reduce directory as they are consumed.</description> |
| </property> |
| |
| <!-- |
| <property> |
| <name>keep.task.files.pattern</name> |
| <value>.*_m_123456_0</value> |
| <description>Keep all files from tasks whose task names match the given |
| regular expression. Defaults to none.</description> |
| </property> |
| --> |
| |
| <property> |
| <name>mapred.output.compress</name> |
| <value>false</value> |
| <description>Should the job outputs be compressed? |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.output.compression.type</name> |
| <value>RECORD</value> |
| <description>If the job outputs are to compressed as SequenceFiles, how should |
| they be compressed? Should be one of NONE, RECORD or BLOCK. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.output.compression.codec</name> |
| <value>org.apache.hadoop.io.compress.DefaultCodec</value> |
| <description>If the job outputs are compressed, how should they be compressed? |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.compress.map.output</name> |
| <value>false</value> |
| <description>Should the outputs of the maps be compressed before being |
| sent across the network. Uses SequenceFile compression. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.map.output.compression.type</name> |
| <value>RECORD</value> |
| <description>If the map outputs are to compressed, how should they |
| be compressed? Should be one of NONE, RECORD or BLOCK. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.map.output.compression.codec</name> |
| <value>org.apache.hadoop.io.compress.DefaultCodec</value> |
| <description>If the map outputs are compressed, how should they be |
| compressed? |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.compress.blocksize</name> |
| <value>1000000</value> |
| <description>The minimum block size for compression in block compressed |
| SequenceFiles. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.lazydecompress</name> |
| <value>true</value> |
| <description>Should values of block-compressed SequenceFiles be decompressed |
| only when necessary. |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.sorter.recordlimit</name> |
| <value>1000000</value> |
| <description>The limit on number of records to be kept in memory in a spill |
| in SequenceFiles.Sorter |
| </description> |
| </property> |
| |
| <property> |
| <name>io.seqfile.compression.type</name> |
| <value>RECORD</value> |
| <description>The default compression type for SequenceFile.Writer. |
| </description> |
| </property> |
| |
| <property> |
| <name>map.sort.class</name> |
| <value>org.apache.hadoop.mapred.MergeSorter</value> |
| <description>The default sort class for sorting keys. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.userlog.limit.kb</name> |
| <value>0</value> |
| <description>The maximum size of user-logs of each task in KB. 0 disables the cap. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.userlog.retain.hours</name> |
| <value>24</value> |
| <description>The maximum time, in hours, for which the user-logs are to be |
| retained. |
| </description> |
| </property> |
| |
| <property> |
| <name>mapred.hosts</name> |
| <value></value> |
| <description>Names a file that contains the list of nodes that may |
| connect to the jobtracker. If the value is empty, all hosts are |
| permitted.</description> |
| </property> |
| |
| <property> |
| <name>mapred.hosts.exclude</name> |
| <value></value> |
| <description>Names a file that contains the list of hosts that |
| should be excluded by the jobtracker. If the value is empty, no |
| hosts are excluded.</description> |
| </property> |
| |
| <property> |
| <name>mapred.max.tracker.failures</name> |
| <value>4</value> |
| <description>The number of task-failures on a tasktracker of a given job |
| after which new tasks of that job aren't assigned to it. |
| </description> |
| </property> |
| |
| <property> |
| <name>jobclient.output.filter</name> |
| <value>FAILED</value> |
| <description>The filter for controlling the output of the task's userlogs sent |
| to the console of the JobClient. |
| The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and |
| ALL. |
| </description> |
| </property> |
| |
| <!-- ipc properties --> |
| |
| <property> |
| <name>ipc.client.timeout</name> |
| <value>60000</value> |
| <description>Defines the timeout for IPC calls in milliseconds.</description> |
| </property> |
| |
| <property> |
| <name>ipc.client.idlethreshold</name> |
| <value>4000</value> |
| <description>Defines the threshold number of connections after which |
| connections will be inspected for idleness. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.maxidletime</name> |
| <value>120000</value> |
| <description>Defines the maximum idle time for a connected client after |
| which it may be disconnected. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.kill.max</name> |
| <value>10</value> |
| <description>Defines the maximum number of clients to disconnect in one go. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.connection.maxidletime</name> |
| <value>1000</value> |
| <description>The maximum time after which a client will bring down the |
| connection to the server. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.client.connect.max.retries</name> |
| <value>10</value> |
| <description>Indicates the number of retries a client will make to establish |
| a server connection. |
| </description> |
| </property> |
| |
| <property> |
| <name>ipc.server.listen.queue.size</name> |
| <value>128</value> |
| <description>Indicates the length of the listen queue for servers accepting |
| client connections. |
| </description> |
| </property> |
| |
| <!-- Job Notification Configuration --> |
| |
| <!-- |
| <property> |
| <name>job.end.notification.url</name> |
| <value>http://localhost:8080/jobstatus.php?jobId=$jobId&jobStatus=$jobStatus</value> |
| <description>Indicates url which will be called on completion of job to inform |
| end status of job. |
| User can give at most 2 variables with URI : $jobId and $jobStatus. |
| If they are present in URI, then they will be replaced by their |
| respective values. |
| </description> |
| </property> |
| --> |
| |
| <property> |
| <name>job.end.retry.attempts</name> |
| <value>0</value> |
| <description>Indicates how many times hadoop should attempt to contact the |
| notification URL </description> |
| </property> |
| |
| <property> |
| <name>job.end.retry.interval</name> |
| <value>30000</value> |
| <description>Indicates time in milliseconds between notification URL retry |
| calls</description> |
| </property> |
| |
| <!-- Web Interface Configuration --> |
| |
| <property> |
| <name>webinterface.private.actions</name> |
| <value>false</value> |
| <description> If set to true, the web interfaces of JT and NN may contain |
| actions, such as kill job, delete file, etc., that should |
| not be exposed to public. Enable this option if the interfaces |
| are only reachable by those who have the right authorization. |
| </description> |
| </property> |
| |
| <!-- Proxy Configuration --> |
| |
| <property> |
| <name>hadoop.rpc.socket.factory.class.default</name> |
| <value>org.apache.hadoop.net.StandardSocketFactory</value> |
| <description> Default SocketFactory to use. This parameter is expected to be |
| formatted as "package.FactoryClassName". |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.rpc.socket.factory.class.ClientProtocol</name> |
| <value></value> |
| <description> SocketFactory to use to connect to a DFS. If null or empty, use |
| hadoop.rpc.socket.class.default. This socket factory is also used by |
| DFSClient to create sockets to DataNodes. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.rpc.socket.factory.class.JobSubmissionProtocol</name> |
| <value></value> |
| <description> SocketFactory to use to connect to a Map/Reduce master |
| (JobTracker). If null or empty, then use hadoop.rpc.socket.class.default. |
| </description> |
| </property> |
| |
| <property> |
| <name>hadoop.socks.server</name> |
| <value></value> |
| <description> Address (host:port) of the SOCKS server to be used by the |
| SocksSocketFactory. |
| </description> |
| </property> |
| |
| </configuration> |